repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
vrieni/orange | Orange/OrangeWidgets/Unsupervised/OWPCA.py | 6 | 21788 | """
<name>PCA</name>
<description>Perform Principal Component Analysis</description>
<contact>ales.erjavec(@ at @)fri.uni-lj.si</contact>
<icon>icons/PCA.svg</icon>
<tags>pca,principal,component,projection</tags>
<priority>3050</priority>
"""
import sys
import numpy as np
from PyQt4.Qwt5 import QwtPlot, QwtPlotCurve, QwtSymbol
from PyQt4.QtCore import pyqtSignal as Signal, pyqtSlot as Slot
import Orange
import Orange.projection.linear as plinear
from OWWidget import *
from OWGraph import OWGraph
import OWGUI
def plot_curve(title=None, pen=None, brush=None, style=QwtPlotCurve.Lines,
symbol=QwtSymbol.Ellipse, legend=True, antialias=True,
auto_scale=True, xaxis=QwtPlot.xBottom, yaxis=QwtPlot.yLeft):
curve = QwtPlotCurve(title or "")
return configure_curve(curve, pen=pen, brush=brush, style=style,
symbol=symbol, legend=legend, antialias=antialias,
auto_scale=auto_scale, xaxis=xaxis, yaxis=yaxis)
def configure_curve(curve, title=None, pen=None, brush=None,
style=QwtPlotCurve.Lines, symbol=QwtSymbol.Ellipse,
legend=True, antialias=True, auto_scale=True,
xaxis=QwtPlot.xBottom, yaxis=QwtPlot.yLeft):
if title is not None:
curve.setTitle(title)
if pen is not None:
curve.setPen(pen)
if brush is not None:
curve.setBrush(brush)
if not isinstance(symbol, QwtSymbol):
symbol_ = QwtSymbol()
symbol_.setStyle(symbol)
symbol = symbol_
curve.setStyle(style)
curve.setSymbol(QwtSymbol(symbol))
curve.setRenderHint(QwtPlotCurve.RenderAntialiased, antialias)
curve.setItemAttribute(QwtPlotCurve.Legend, legend)
curve.setItemAttribute(QwtPlotCurve.AutoScale, auto_scale)
curve.setAxis(xaxis, yaxis)
return curve
class PlotTool(QObject):
"""
A base class for Plot tools that operate on QwtPlot's canvas
widget by installing itself as its event filter.
"""
cursor = Qt.ArrowCursor
def __init__(self, parent=None, graph=None):
QObject.__init__(self, parent)
self.__graph = None
self.__oldCursor = None
self.setGraph(graph)
def setGraph(self, graph):
"""
Install this tool to operate on ``graph``.
"""
if self.__graph is graph:
return
if self.__graph is not None:
self.uninstall(self.__graph)
self.__graph = graph
if graph is not None:
self.install(graph)
def graph(self):
return self.__graph
def install(self, graph):
canvas = graph.canvas()
canvas.setMouseTracking(True)
canvas.installEventFilter(self)
canvas.destroyed.connect(self.__on_destroyed)
self.__oldCursor = canvas.cursor()
canvas.setCursor(self.cursor)
def uninstall(self, graph):
canvas = graph.canvas()
canvas.removeEventFilter(self)
canvas.setCursor(self.__oldCursor)
canvas.destroyed.disconnect(self.__on_destroyed)
self.__oldCursor = None
def eventFilter(self, obj, event):
if obj is self.__graph.canvas():
return self.canvasEvent(event)
return False
def canvasEvent(self, event):
"""
Main handler for a canvas events.
"""
if event.type() == QEvent.MouseButtonPress:
return self.mousePressEvent(event)
elif event.type() == QEvent.MouseButtonRelease:
return self.mouseReleaseEvent(event)
elif event.type() == QEvent.MouseButtonDblClick:
return self.mouseDoubleClickEvent(event)
elif event.type() == QEvent.MouseMove:
return self.mouseMoveEvent(event)
elif event.type() == QEvent.Leave:
return self.leaveEvent(event)
elif event.type() == QEvent.Enter:
return self.enterEvent(event)
return False
# These are actually event filters (note the return values)
def mousePressEvent(self, event):
return False
def mouseMoveEvent(self, event):
return False
def mouseReleaseEvent(self, event):
return False
def mouseDoubleClickEvent(self, event):
return False
def enterEvent(self, event):
return False
def leaveEvent(self, event):
return False
def keyPressEvent(self, event):
return False
def transform(self, point, xaxis=QwtPlot.xBottom, yaxis=QwtPlot.yLeft):
"""
Transform a QPointF from plot coordinates to canvas local coordinates.
"""
x = self.__graph.transform(xaxis, point.x())
y = self.__graph.transform(yaxis, point.y())
return QPoint(x, y)
def invTransform(self, point, xaxis=QwtPlot.xBottom, yaxis=QwtPlot.yLeft):
"""
Transform a QPoint from canvas local coordinates to plot coordinates.
"""
x = self.__graph.invTransform(xaxis, point.x())
y = self.__graph.invTransform(yaxis, point.y())
return QPointF(x, y)
@Slot()
def __on_destroyed(self, obj):
obj.removeEventFilter(self)
class CutoffControler(PlotTool):
class CutoffCurve(QwtPlotCurve):
pass
cutoffChanged = Signal(float)
cutoffMoved = Signal(float)
cutoffPressed = Signal()
cutoffReleased = Signal()
NoState, Drag = 0, 1
def __init__(self, parent=None, graph=None):
self.__curve = None
self.__range = (0, 1)
self.__cutoff = 0
super(CutoffControler, self).__init__(parent, graph)
self._state = self.NoState
def install(self, graph):
super(CutoffControler, self).install(graph)
assert self.__curve is None
self.__curve = CutoffControler.CutoffCurve("")
configure_curve(self.__curve, symbol=QwtSymbol.NoSymbol, legend=False)
self.__curve.setData([self.__cutoff, self.__cutoff], [0.0, 1.0])
self.__curve.attach(graph)
def uninstall(self, graph):
super(CutoffControler, self).uninstall(graph)
self.__curve.detach()
self.__curve = None
def _toRange(self, value):
minval, maxval = self.__range
return max(min(value, maxval), minval)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
cut = self.invTransform(event.pos()).x()
self.setCutoff(cut)
self.cutoffPressed.emit()
self._state = self.Drag
return True
def mouseMoveEvent(self, event):
if self._state == self.Drag:
cut = self._toRange(self.invTransform(event.pos()).x())
self.setCutoff(cut)
self.cutoffMoved.emit(cut)
else:
cx = self.transform(QPointF(self.cutoff(), 0)).x()
if abs(cx - event.pos().x()) < 2:
self.graph().canvas().setCursor(Qt.SizeHorCursor)
else:
self.graph().canvas().setCursor(self.cursor)
return True
def mouseReleaseEvent(self, event):
if event.button() == Qt.LeftButton and self._state == self.Drag:
cut = self._toRange(self.invTransform(event.pos()).x())
self.setCutoff(cut)
self.cutoffReleased.emit()
self._state = self.NoState
return True
def setCutoff(self, cutoff):
minval, maxval = self.__range
cutoff = max(min(cutoff, maxval), minval)
if self.__cutoff != cutoff:
self.__cutoff = cutoff
if self.__curve is not None:
self.__curve.setData([cutoff, cutoff], [0.0, 1.0])
self.cutoffChanged.emit(cutoff)
if self.graph() is not None:
self.graph().replot()
def cutoff(self):
return self.__cutoff
def setRange(self, minval, maxval):
maxval = max(minval, maxval)
if self.__range != (minval, maxval):
self.__range = (minval, maxval)
self.setCutoff(max(min(self.cutoff(), maxval), minval))
class Graph(OWGraph):
def __init__(self, *args, **kwargs):
super(Graph, self).__init__(*args, **kwargs)
self.gridCurve.attach(self)
# bypass the OWGraph event handlers
def mousePressEvent(self, event):
QwtPlot.mousePressEvent(self, event)
def mouseMoveEvent(self, event):
QwtPlot.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event):
QwtPlot.mouseReleaseEvent(self, event)
class OWPCA(OWWidget):
settingsList = ["standardize", "max_components", "variance_covered",
"use_generalized_eigenvectors", "auto_commit"]
def __init__(self, parent=None, signalManager=None, title="PCA"):
OWWidget.__init__(self, parent, signalManager, title, wantGraph=True)
self.inputs = [("Input Data", Orange.data.Table, self.set_data)]
self.outputs = [("Transformed Data", Orange.data.Table, Default),
("Eigen Vectors", Orange.data.Table)]
self.standardize = True
self.max_components = 0
self.variance_covered = 100.0
self.use_generalized_eigenvectors = False
self.auto_commit = False
self.loadSettings()
self.data = None
self.changed_flag = False
#####
# GUI
#####
grid = QGridLayout()
box = OWGUI.widgetBox(self.controlArea, "Components Selection",
orientation=grid)
label1 = QLabel("Max components", box)
grid.addWidget(label1, 1, 0)
sb1 = OWGUI.spin(box, self, "max_components", 0, 1000,
tooltip="Maximum number of components",
callback=self.on_update,
addToLayout=False,
keyboardTracking=False
)
self.max_components_spin = sb1.control
self.max_components_spin.setSpecialValueText("All")
grid.addWidget(sb1.control, 1, 1)
label2 = QLabel("Variance covered", box)
grid.addWidget(label2, 2, 0)
sb2 = OWGUI.doubleSpin(box, self, "variance_covered", 1.0, 100.0, 1.0,
tooltip="Percent of variance covered.",
callback=self.on_update,
decimals=1,
addToLayout=False,
keyboardTracking=False
)
sb2.control.setSuffix("%")
grid.addWidget(sb2.control, 2, 1)
OWGUI.rubber(self.controlArea)
box = OWGUI.widgetBox(self.controlArea, "Commit")
cb = OWGUI.checkBox(box, self, "auto_commit", "Commit on any change")
b = OWGUI.button(box, self, "Commit",
callback=self.update_components)
OWGUI.setStopper(self, b, cb, "changed_flag", self.update_components)
self.plot = Graph()
canvas = self.plot.canvas()
canvas.setFrameStyle(QFrame.StyledPanel)
self.mainArea.layout().addWidget(self.plot)
self.plot.setAxisTitle(QwtPlot.yLeft, "Proportion of Variance")
self.plot.setAxisTitle(QwtPlot.xBottom, "Principal Components")
self.plot.setAxisScale(QwtPlot.yLeft, 0.0, 1.0)
self.plot.enableGridXB(True)
self.plot.enableGridYL(True)
self.plot.setGridColor(Qt.lightGray)
self.variance_curve = plot_curve(
"Variance",
pen=QPen(Qt.red, 2),
symbol=QwtSymbol.NoSymbol,
xaxis=QwtPlot.xBottom,
yaxis=QwtPlot.yLeft
)
self.cumulative_variance_curve = plot_curve(
"Cumulative Variance",
pen=QPen(Qt.darkYellow, 2),
symbol=QwtSymbol.NoSymbol,
xaxis=QwtPlot.xBottom,
yaxis=QwtPlot.yLeft
)
self.variance_curve.attach(self.plot)
self.cumulative_variance_curve.attach(self.plot)
self.selection_tool = CutoffControler(parent=self.plot.canvas())
self.selection_tool.cutoffMoved.connect(self.on_cutoff_moved)
self.graphButton.clicked.connect(self.saveToFile)
self.components = None
self.variances = None
self.variances_sum = None
self.projector_full = None
self.currently_selected = 0
self.resize(800, 400)
def clear(self):
"""
Clear (reset) the widget state.
"""
self.data = None
self.selection_tool.setGraph(None)
self.clear_cached()
self.variance_curve.setVisible(False)
self.cumulative_variance_curve.setVisible(False)
def clear_cached(self):
"""Clear cached components
"""
self.components = None
self.variances = None
self.variances_cumsum = None
self.projector_full = None
self.currently_selected = 0
def set_data(self, data=None):
"""Set the widget input data.
"""
self.clear()
if data is not None:
self.data = data
self.on_change()
else:
self.send("Transformed Data", None)
self.send("Eigen Vectors", None)
def on_change(self):
"""Data has changed and we need to recompute the projection.
"""
if self.data is None:
return
self.clear_cached()
self.apply()
def on_update(self):
"""Component selection was changed by the user.
"""
if self.data is None:
return
self.update_cutoff_curve()
if self.currently_selected != self.number_of_selected_components():
self.update_components_if()
def construct_pca_all_comp(self):
pca = plinear.PCA(standardize=self.standardize,
max_components=0,
variance_covered=1,
use_generalized_eigenvectors=self.use_generalized_eigenvectors
)
return pca
def construct_pca(self):
max_components = self.max_components
variance_covered = self.variance_covered
pca = plinear.PCA(standardize=self.standardize,
max_components=max_components,
variance_covered=variance_covered / 100.0,
use_generalized_eigenvectors=self.use_generalized_eigenvectors
)
return pca
def apply(self):
"""
Apply PCA on input data, caching the full projection and
updating the selected components.
"""
pca = self.construct_pca_all_comp()
self.projector_full = pca(self.data)
self.variances = self.projector_full.variances
self.variances /= np.sum(self.variances)
self.variances_cumsum = np.cumsum(self.variances)
self.max_components_spin.setRange(0, len(self.variances))
self.max_components = min(self.max_components,
len(self.variances) - 1)
self.update_scree_plot()
self.update_cutoff_curve()
self.update_components_if()
def update_components_if(self):
if self.auto_commit:
self.update_components()
else:
self.changed_flag = True
def update_components(self):
"""Update the output components.
"""
if self.data is None:
return
scale = self.projector_full.scale
center = self.projector_full.center
components = self.projector_full.projection
input_domain = self.projector_full.input_domain
variances = self.projector_full.variances
# Get selected components (based on max_components and
# variance_coverd)
pca = self.construct_pca()
variances, components, variance_sum = pca._select_components(variances, components)
projector = plinear.PcaProjector(input_domain=input_domain,
standardize=self.standardize,
scale=scale,
center=center,
projection=components,
variances=variances,
variance_sum=variance_sum)
projected_data = projector(self.data)
append_metas(projected_data, self.data)
eigenvectors = self.eigenvectors_as_table(components)
self.currently_selected = self.number_of_selected_components()
self.send("Transformed Data", projected_data)
self.send("Eigen Vectors", eigenvectors)
self.changed_flag = False
def eigenvectors_as_table(self, U):
features = [Orange.feature.Continuous("C%i" % i) \
for i in range(1, U.shape[1] + 1)]
domain = Orange.data.Domain(features, False)
return Orange.data.Table(domain, [list(v) for v in U])
def update_scree_plot(self):
x_space = np.arange(0, len(self.variances))
self.plot.enableAxis(QwtPlot.xBottom, True)
self.plot.enableAxis(QwtPlot.yLeft, True)
if len(x_space) <= 5:
self.plot.setXlabels(["PC" + str(i + 1) for i in x_space])
else:
# Restore continuous plot scale
# TODO: disable minor ticks
self.plot.setXlabels(None)
self.variance_curve.setData(x_space, self.variances)
self.cumulative_variance_curve.setData(x_space, self.variances_cumsum)
self.variance_curve.setVisible(True)
self.cumulative_variance_curve.setVisible(True)
self.selection_tool.setRange(0, len(self.variances) - 1)
self.selection_tool.setGraph(self.plot)
self.plot.replot()
def on_cutoff_moved(self, value):
"""Cutoff curve was moved by the user.
"""
components = int(np.floor(value)) + 1
# Did the number of components actually change
self.max_components = components
self.variance_covered = self.variances_cumsum[components - 1] * 100
if self.currently_selected != self.number_of_selected_components():
self.update_components_if()
def update_cutoff_curve(self):
"""Update cutoff curve from 'Components Selection' control box.
"""
if self.max_components == 0:
# Special "All" value
max_components = len(self.variances_cumsum)
else:
max_components = self.max_components
variance = self.variances_cumsum[max_components - 1] * 100.0
if variance < self.variance_covered:
cutoff = max_components - 1
else:
cutoff = np.searchsorted(self.variances_cumsum,
self.variance_covered / 100.0)
self.selection_tool.setCutoff(float(cutoff + 0.5))
def number_of_selected_components(self):
"""How many components are selected.
"""
if self.data is None:
return 0
variance_components = np.searchsorted(self.variances_cumsum,
self.variance_covered / 100.0)
if self.max_components == 0:
# Special "All" value
max_components = len(self.variances_cumsum)
else:
max_components = self.max_components
return min(variance_components + 1, max_components)
def sendReport(self):
self.reportSettings("PCA Settings",
[("Max. components", self.max_components),
("Variance covered", "%i%%" % self.variance_covered),
])
if self.data is not None and self.projector_full:
output_domain = self.projector_full.output_domain
st_dev = np.sqrt(self.projector_full.variances)
summary = [[""] + [a.name for a in output_domain.attributes],
["Std. deviation"] + ["%.3f" % sd for sd in st_dev],
["Proportion Var"] + ["%.3f" % v for v in self.variances * 100.0],
["Cumulative Var"] + ["%.3f" % v for v in self.variances_cumsum * 100.0]
]
th = "<th>%s</th>".__mod__
header = "".join(map(th, summary[0]))
td = "<td>%s</td>".__mod__
summary = ["".join(map(td, row)) for row in summary[1:]]
tr = "<tr>%s</tr>".__mod__
summary = "\n".join(map(tr, [header] + summary))
summary = "<table>\n%s\n</table>" % summary
self.reportSection("Summary")
self.reportRaw(summary)
self.reportSection("Scree Plot")
self.reportImage(self.plot.saveToFileDirect)
def saveToFile(self):
self.plot.saveToFile()
def append_metas(dest, source):
"""
Append all meta attributes from the `source` table to `dest` table.
The tables must be of the same length.
:param dest:
An data table into which the meta values will be copied.
:type dest: :class:`Orange.data.Table`
:param source:
A data table with the meta attributes/values to be copied into `dest`.
:type source: :class:`Orange.data.Table`
"""
if len(dest) != len(source):
raise ValueError("'dest' and 'source' must have the same length.")
dest.domain.add_metas(source.domain.get_metas())
for dest_inst, source_inst in zip(dest, source):
for meta_id, val in source_inst.get_metas().items():
dest_inst[meta_id] = val
if __name__ == "__main__":
app = QApplication(sys.argv)
w = OWPCA()
data = Orange.data.Table("iris")
w.set_data(data)
w.show()
w.set_data(Orange.data.Table("brown-selected"))
app.exec_()
| gpl-3.0 | -7,858,153,206,118,274,000 | 6,343,273,601,756,436,000 | 33.097027 | 95 | 0.583624 | false |
redhat-openstack/horizon | openstack_dashboard/test/test_data/utils.py | 14 | 4574 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def load_test_data(load_onto=None):
from openstack_dashboard.test.test_data import ceilometer_data
from openstack_dashboard.test.test_data import cinder_data
from openstack_dashboard.test.test_data import exceptions
from openstack_dashboard.test.test_data import glance_data
from openstack_dashboard.test.test_data import heat_data
from openstack_dashboard.test.test_data import keystone_data
from openstack_dashboard.test.test_data import neutron_data
from openstack_dashboard.test.test_data import nova_data
from openstack_dashboard.test.test_data import swift_data
# The order of these loaders matters, some depend on others.
loaders = (
exceptions.data,
keystone_data.data,
glance_data.data,
nova_data.data,
cinder_data.data,
neutron_data.data,
swift_data.data,
heat_data.data,
ceilometer_data.data,
)
if load_onto:
for data_func in loaders:
data_func(load_onto)
return load_onto
else:
return TestData(*loaders)
class TestData(object):
"""Holder object for test data. Any functions passed to the init method
will be called with the ``TestData`` object as their only argument. They
can then load data onto the object as desired.
The idea is to use the instantiated object like this::
>>> import glance_data
>>> TEST = TestData(glance_data.data)
>>> TEST.images.list()
[<Image: visible_image>, <Image: invisible_image>]
>>> TEST.images.first()
<Image: visible_image>
You can load as little or as much data as you like as long as the loaders
don't conflict with each other.
See the
:class:`~openstack_dashboard.test.test_data.utils.TestDataContainer`
class for a list of available methods.
"""
def __init__(self, *args):
for data_func in args:
data_func(self)
class TestDataContainer(object):
"""A container for test data objects.
The behavior of this class is meant to mimic a "manager" class, which
has convenient shortcuts for common actions like "list", "filter", "get",
and "add".
"""
def __init__(self):
self._objects = []
def add(self, *args):
"""Add a new object to this container.
Generally this method should only be used during data loading, since
adding data during a test can affect the results of other tests.
"""
for obj in args:
if obj not in self._objects:
self._objects.append(obj)
def list(self):
"""Returns a list of all objects in this container."""
return self._objects
def filter(self, filtered=None, **kwargs):
"""Returns objects in this container whose attributes match the given
keyword arguments.
"""
if filtered is None:
filtered = self._objects
try:
key, value = kwargs.popitem()
except KeyError:
# We're out of filters, return
return filtered
def get_match(obj):
return hasattr(obj, key) and getattr(obj, key) == value
filtered = [obj for obj in filtered if get_match(obj)]
return self.filter(filtered=filtered, **kwargs)
def get(self, **kwargs):
"""Returns the single object in this container whose attributes match
the given keyword arguments. An error will be raised if the arguments
provided don't return exactly one match.
"""
matches = self.filter(**kwargs)
if not matches:
raise Exception("No matches found.")
elif len(matches) > 1:
raise Exception("Multiple matches found.")
else:
return matches.pop()
def first(self):
"""Returns the first object from this container."""
return self._objects[0]
def count(self):
return len(self._objects)
| apache-2.0 | -2,919,035,937,239,980,000 | -2,825,560,236,251,147,000 | 33.390977 | 78 | 0.643419 | false |
dbentley/pants | contrib/android/tests/python/pants_test/contrib/android/tasks/test_aapt_gen_integration.py | 14 | 4947 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import unittest
from pants_test.contrib.android.android_integration_test import AndroidIntegrationTest
class AaptGenIntegrationTest(AndroidIntegrationTest):
"""Integration test for AaptGen
The Android SDK is modular, finding an SDK on the PATH is no guarantee that there is
a particular aapt binary on disk. The TOOLS are the ones required by the target in 'test_aapt_gen'
method. If you add a target, you may need to expand the TOOLS list and perhaps define new
BUILD_TOOLS or TARGET_SDK class variables.
"""
TOOLS = [
os.path.join('build-tools', AndroidIntegrationTest.BUILD_TOOLS, 'aapt'),
os.path.join('platforms', 'android-' + AndroidIntegrationTest.TARGET_SDK, 'android.jar')
]
tools = AndroidIntegrationTest.requirements(TOOLS)
def aapt_gen_test(self, target):
pants_run = self.run_pants(['gen', target])
self.assert_success(pants_run)
@unittest.skipUnless(tools, reason='Android integration test requires tools {0!r} '
'and ANDROID_HOME set in path.'.format(TOOLS))
def test_aapt_gen(self):
self.aapt_gen_test(AndroidIntegrationTest.TEST_TARGET)
@unittest.skipUnless(tools, reason='Android integration test requires tools {0!r} '
'and ANDROID_HOME set in path.'.format(TOOLS))
# TODO(mateor) Write a testproject instead of using hello_with_library which may change.
def test_android_library_dep(self):
# Doing the work under a tempdir gives us a handle for the workdir and guarantees a clean build.
with self.temporary_workdir() as workdir:
spec = 'contrib/android/examples/src/android/hello_with_library:'
pants_run = self.run_pants_with_workdir(['gen', '-ldebug', spec], workdir)
self.assert_success(pants_run)
# Make sure that the R.java was produced for the binary and its library dependency.
lib_file = 'gen/aapt/21/org/pantsbuild/examples/example_library/R.java'
apk_file = 'gen/aapt/21/org/pantsbuild/examples/hello_with_library/R.java'
self.assertTrue(os.path.isfile(os.path.join(workdir, lib_file)))
self.assertTrue(os.path.isfile(os.path.join(workdir, apk_file)))
# Scrape debug statements.
def find_aapt_blocks(lines):
for line in lines:
if re.search(r'Executing: .*?\baapt', line):
yield line
aapt_blocks = list(find_aapt_blocks(pants_run.stderr_data.split('\n')))
# Pulling in google-play-services-v21 from the SDK brings in 20 .aar libraries of which only 6
# have resources. Add 2 for android_binary and android_library targets = 8 total invocations.
self.assertEquals(len(aapt_blocks), 8, 'Expected eight invocations of the aapt tool!'
'(was :{})\n{}'.format(len(aapt_blocks),
pants_run.stderr_data))
# Check to make sure the resources are being passed in correct order (apk->libs).
for line in aapt_blocks:
apk = re.search(r'hello_with_library.*?\b', line)
library = re.search(r'contrib/android/examples/src/android/example_library/AndroidManifest.*?\b', line)
resource_dirs = re.findall(r'-S ([^\s]+)', line)
if apk:
# The order of resource directories should mirror the dependencies. The dependency order
# is hello_with_library -> example_library -> gms-library.
self.assertEqual(resource_dirs[0], 'contrib/android/examples/src/android/hello_with_library/main/res')
self.assertEqual(resource_dirs[1], 'contrib/android/examples/src/android/example_library/res')
self.assertEqual(len(resource_dirs), 8, 'Expected eight resource dirs to be included '
'when calling aapt on hello_with_library apk. '
'(was: {})\n'.format(len(resource_dirs)))
elif library:
# The seven invocations are the example_library and the 6 gms dependencies.
self.assertEqual(len(resource_dirs), 7, 'Expected seven resource dir to be included '
'when calling aapt on example_library dep. '
'(was: {})\n'.format(len(resource_dirs)))
else:
self.assertEqual(len(resource_dirs), 1, 'Expected one resource dir to be included when '
'calling aapt on each gms-library dep. '
'(was: {})\n'.format(len(resource_dirs)))
| apache-2.0 | 2,758,327,231,096,735,000 | 4,947,911,106,256,531,000 | 52.771739 | 112 | 0.634122 | false |
codeforamerica/skillcamp | ENV/lib/python2.7/site-packages/sqlalchemy/event/registry.py | 77 | 7468 | # event/registry.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides managed registration services on behalf of :func:`.listen`
arguments.
By "managed registration", we mean that event listening functions and
other objects can be added to various collections in such a way that their
membership in all those collections can be revoked at once, based on
an equivalent :class:`._EventKey`.
"""
from __future__ import absolute_import
import weakref
import collections
import types
from .. import exc, util
_key_to_collection = collections.defaultdict(dict)
"""
Given an original listen() argument, can locate all
listener collections and the listener fn contained
(target, identifier, fn) -> {
ref(listenercollection) -> ref(listener_fn)
ref(listenercollection) -> ref(listener_fn)
ref(listenercollection) -> ref(listener_fn)
}
"""
_collection_to_key = collections.defaultdict(dict)
"""
Given a _ListenerCollection or _DispatchDescriptor, can locate
all the original listen() arguments and the listener fn contained
ref(listenercollection) -> {
ref(listener_fn) -> (target, identifier, fn),
ref(listener_fn) -> (target, identifier, fn),
ref(listener_fn) -> (target, identifier, fn),
}
"""
def _collection_gced(ref):
# defaultdict, so can't get a KeyError
if not _collection_to_key or ref not in _collection_to_key:
return
listener_to_key = _collection_to_key.pop(ref)
for key in listener_to_key.values():
if key in _key_to_collection:
# defaultdict, so can't get a KeyError
dispatch_reg = _key_to_collection[key]
dispatch_reg.pop(ref)
if not dispatch_reg:
_key_to_collection.pop(key)
def _stored_in_collection(event_key, owner):
key = event_key._key
dispatch_reg = _key_to_collection[key]
owner_ref = owner.ref
listen_ref = weakref.ref(event_key._listen_fn)
if owner_ref in dispatch_reg:
assert dispatch_reg[owner_ref] == listen_ref
else:
dispatch_reg[owner_ref] = listen_ref
listener_to_key = _collection_to_key[owner_ref]
listener_to_key[listen_ref] = key
def _removed_from_collection(event_key, owner):
key = event_key._key
dispatch_reg = _key_to_collection[key]
listen_ref = weakref.ref(event_key._listen_fn)
owner_ref = owner.ref
dispatch_reg.pop(owner_ref, None)
if not dispatch_reg:
del _key_to_collection[key]
if owner_ref in _collection_to_key:
listener_to_key = _collection_to_key[owner_ref]
listener_to_key.pop(listen_ref)
def _stored_in_collection_multi(newowner, oldowner, elements):
if not elements:
return
oldowner = oldowner.ref
newowner = newowner.ref
old_listener_to_key = _collection_to_key[oldowner]
new_listener_to_key = _collection_to_key[newowner]
for listen_fn in elements:
listen_ref = weakref.ref(listen_fn)
key = old_listener_to_key[listen_ref]
dispatch_reg = _key_to_collection[key]
if newowner in dispatch_reg:
assert dispatch_reg[newowner] == listen_ref
else:
dispatch_reg[newowner] = listen_ref
new_listener_to_key[listen_ref] = key
def _clear(owner, elements):
if not elements:
return
owner = owner.ref
listener_to_key = _collection_to_key[owner]
for listen_fn in elements:
listen_ref = weakref.ref(listen_fn)
key = listener_to_key[listen_ref]
dispatch_reg = _key_to_collection[key]
dispatch_reg.pop(owner, None)
if not dispatch_reg:
del _key_to_collection[key]
class _EventKey(object):
"""Represent :func:`.listen` arguments.
"""
def __init__(self, target, identifier, fn, dispatch_target, _fn_wrap=None):
self.target = target
self.identifier = identifier
self.fn = fn
if isinstance(fn, types.MethodType):
self.fn_key = id(fn.__func__), id(fn.__self__)
else:
self.fn_key = id(fn)
self.fn_wrap = _fn_wrap
self.dispatch_target = dispatch_target
@property
def _key(self):
return (id(self.target), self.identifier, self.fn_key)
def with_wrapper(self, fn_wrap):
if fn_wrap is self._listen_fn:
return self
else:
return _EventKey(
self.target,
self.identifier,
self.fn,
self.dispatch_target,
_fn_wrap=fn_wrap
)
def with_dispatch_target(self, dispatch_target):
if dispatch_target is self.dispatch_target:
return self
else:
return _EventKey(
self.target,
self.identifier,
self.fn,
dispatch_target,
_fn_wrap=self.fn_wrap
)
def listen(self, *args, **kw):
once = kw.pop("once", False)
if once:
self.with_wrapper(util.only_once(self._listen_fn)).listen(*args, **kw)
else:
self.dispatch_target.dispatch._listen(self, *args, **kw)
def remove(self):
key = self._key
if key not in _key_to_collection:
raise exc.InvalidRequestError(
"No listeners found for event %s / %r / %s " %
(self.target, self.identifier, self.fn)
)
dispatch_reg = _key_to_collection.pop(key)
for collection_ref, listener_ref in dispatch_reg.items():
collection = collection_ref()
listener_fn = listener_ref()
if collection is not None and listener_fn is not None:
collection.remove(self.with_wrapper(listener_fn))
def contains(self):
"""Return True if this event key is registered to listen.
"""
return self._key in _key_to_collection
def base_listen(self, propagate=False, insert=False,
named=False):
target, identifier, fn = \
self.dispatch_target, self.identifier, self._listen_fn
dispatch_descriptor = getattr(target.dispatch, identifier)
fn = dispatch_descriptor._adjust_fn_spec(fn, named)
self = self.with_wrapper(fn)
if insert:
dispatch_descriptor.\
for_modify(target.dispatch).insert(self, propagate)
else:
dispatch_descriptor.\
for_modify(target.dispatch).append(self, propagate)
@property
def _listen_fn(self):
return self.fn_wrap or self.fn
def append_value_to_list(self, owner, list_, value):
_stored_in_collection(self, owner)
list_.append(value)
def append_to_list(self, owner, list_):
_stored_in_collection(self, owner)
list_.append(self._listen_fn)
def remove_from_list(self, owner, list_):
_removed_from_collection(self, owner)
list_.remove(self._listen_fn)
def prepend_to_list(self, owner, list_):
_stored_in_collection(self, owner)
list_.insert(0, self._listen_fn)
| mit | -7,251,328,826,001,116,000 | 4,341,488,939,328,534,000 | 29.987552 | 84 | 0.597215 | false |
ShineFan/odoo | addons/report_intrastat/__init__.py | 377 | 1079 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_intrastat
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,285,135,507,165,092,000 | -4,681,458,531,268,924,000 | 43.958333 | 79 | 0.611677 | false |
vipulkanade/EventbriteDjango | lib/python2.7/site-packages/requests/packages/chardet/codingstatemachine.py | 2931 | 2318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart
from .compat import wrap_ord
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][wrap_ord(c)]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
curr_state = (self._mCurrentState * self._mModel['classFactor']
+ byteCls)
self._mCurrentState = self._mModel['stateTable'][curr_state]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
| mit | -5,708,649,235,561,999,000 | -8,388,502,364,514,044,000 | 37 | 73 | 0.656601 | false |
karthikvadla16/spark-tk | python/sparktk/graph/ops/betweenness_centrality.py | 7 | 2366 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def betweenness_centrality(self, edge_weight=None, normalize=True):
"""
**Betweenness Centrality**
Calculates the betweenness centrality exactly, with an optional weights parameter
for the distance between the vertices.
Parameters
----------
:param edge_weight: (Optional(str)) The name of the column containing the edge weights,
If none, every edge is assigned a weight of 1.
:param normalize: (Optional(bool)) If true, normalize the betweenness centrality values
by the number of pairwise paths possible
:return: (Frame) Frame containing the vertex IDs and their corresponding betweenness centrality value
Examples
--------
>>> vertex_schema = [('id', int)]
>>> edge_schema = [('src', int), ('dst', int)]
>>> vertex_rows = [ [1], [2], [3], [4], [5] ]
>>> edge_rows = [ [1, 2], [1, 3], [2, 3], [1, 4], [4, 5] ]
>>> vertex_frame = tc.frame.create(vertex_rows, vertex_schema)
>>> edge_frame = tc.frame.create(edge_rows, edge_schema)
>>> graph = tc.graph.create(vertex_frame, edge_frame)
>>> result = graph.betweenness_centrality()
>>> result.inspect()
[#] id betweenness_centrality
===============================
[0] 1 0.666666666667
[1] 2 0.0
[2] 3 0.0
[3] 4 0.5
[4] 5 0.0
"""
from sparktk.frame.frame import Frame
return Frame(self._tc, self._scala.betweennessCentrality(self._tc.jutils.convert.to_scala_option(edge_weight), normalize))
| apache-2.0 | -3,666,440,878,422,557,000 | -3,227,415,048,412,446,000 | 34.75 | 126 | 0.60271 | false |
citrix-openstack-build/python-novaclient | novaclient/v1_1/client.py | 7 | 7348 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient import client
from novaclient.v1_1 import agents
from novaclient.v1_1 import certs
from novaclient.v1_1 import cloudpipe
from novaclient.v1_1 import aggregates
from novaclient.v1_1 import availability_zones
from novaclient.v1_1 import coverage_ext
from novaclient.v1_1 import flavors
from novaclient.v1_1 import flavor_access
from novaclient.v1_1 import floating_ip_dns
from novaclient.v1_1 import floating_ips
from novaclient.v1_1 import floating_ip_pools
from novaclient.v1_1 import fping
from novaclient.v1_1 import hosts
from novaclient.v1_1 import hypervisors
from novaclient.v1_1 import images
from novaclient.v1_1 import keypairs
from novaclient.v1_1 import limits
from novaclient.v1_1 import networks
from novaclient.v1_1 import quota_classes
from novaclient.v1_1 import quotas
from novaclient.v1_1 import security_group_rules
from novaclient.v1_1 import security_groups
from novaclient.v1_1 import servers
from novaclient.v1_1 import usage
from novaclient.v1_1 import virtual_interfaces
from novaclient.v1_1 import volumes
from novaclient.v1_1 import volume_snapshots
from novaclient.v1_1 import volume_types
from novaclient.v1_1 import services
from novaclient.v1_1 import fixed_ips
from novaclient.v1_1 import floating_ips_bulk
class Client(object):
"""
Top-level object to access the OpenStack Compute API.
Create an instance with your creds::
>>> client = Client(USERNAME, PASSWORD, PROJECT_ID, AUTH_URL)
Then call methods on its managers::
>>> client.servers.list()
...
>>> client.flavors.list()
...
"""
# FIXME(jesse): project_id isn't required to authenticate
def __init__(self, username, api_key, project_id, auth_url=None,
insecure=False, timeout=None, proxy_tenant_id=None,
proxy_token=None, region_name=None,
endpoint_type='publicURL', extensions=None,
service_type='compute', service_name=None,
volume_service_name=None, timings=False,
bypass_url=None, os_cache=False, no_cache=True,
http_log_debug=False, auth_system='keystone',
auth_plugin=None,
cacert=None, tenant_id=None):
# FIXME(comstud): Rename the api_key argument above when we
# know it's not being used as keyword argument
password = api_key
self.projectid = project_id
self.tenant_id = tenant_id
self.flavors = flavors.FlavorManager(self)
self.flavor_access = flavor_access.FlavorAccessManager(self)
self.images = images.ImageManager(self)
self.limits = limits.LimitsManager(self)
self.servers = servers.ServerManager(self)
# extensions
self.agents = agents.AgentsManager(self)
self.dns_domains = floating_ip_dns.FloatingIPDNSDomainManager(self)
self.dns_entries = floating_ip_dns.FloatingIPDNSEntryManager(self)
self.cloudpipe = cloudpipe.CloudpipeManager(self)
self.certs = certs.CertificateManager(self)
self.floating_ips = floating_ips.FloatingIPManager(self)
self.floating_ip_pools = floating_ip_pools.FloatingIPPoolManager(self)
self.fping = fping.FpingManager(self)
self.volumes = volumes.VolumeManager(self)
self.volume_snapshots = volume_snapshots.SnapshotManager(self)
self.volume_types = volume_types.VolumeTypeManager(self)
self.keypairs = keypairs.KeypairManager(self)
self.networks = networks.NetworkManager(self)
self.quota_classes = quota_classes.QuotaClassSetManager(self)
self.quotas = quotas.QuotaSetManager(self)
self.security_groups = security_groups.SecurityGroupManager(self)
self.security_group_rules = \
security_group_rules.SecurityGroupRuleManager(self)
self.usage = usage.UsageManager(self)
self.virtual_interfaces = \
virtual_interfaces.VirtualInterfaceManager(self)
self.aggregates = aggregates.AggregateManager(self)
self.hosts = hosts.HostManager(self)
self.hypervisors = hypervisors.HypervisorManager(self)
self.services = services.ServiceManager(self)
self.fixed_ips = fixed_ips.FixedIPsManager(self)
self.floating_ips_bulk = floating_ips_bulk.FloatingIPBulkManager(self)
self.os_cache = os_cache or not no_cache
self.coverage = coverage_ext.CoverageManager(self)
self.availability_zones = \
availability_zones.AvailabilityZoneManager(self)
# Add in any extensions...
if extensions:
for extension in extensions:
if extension.manager_class:
setattr(self, extension.name,
extension.manager_class(self))
self.client = client.HTTPClient(username,
password,
projectid=project_id,
tenant_id=tenant_id,
auth_url=auth_url,
insecure=insecure,
timeout=timeout,
auth_system=auth_system,
auth_plugin=auth_plugin,
proxy_token=proxy_token,
proxy_tenant_id=proxy_tenant_id,
region_name=region_name,
endpoint_type=endpoint_type,
service_type=service_type,
service_name=service_name,
volume_service_name=volume_service_name,
timings=timings,
bypass_url=bypass_url,
os_cache=self.os_cache,
http_log_debug=http_log_debug,
cacert=cacert)
def set_management_url(self, url):
self.client.set_management_url(url)
def get_timings(self):
return self.client.get_timings()
def reset_timings(self):
self.client.reset_timings()
def authenticate(self):
"""
Authenticate against the server.
Normally this is called automatically when you first access the API,
but you can call this method to force authentication right now.
Returns on success; raises :exc:`exceptions.Unauthorized` if the
credentials are wrong.
"""
self.client.authenticate()
| apache-2.0 | -7,337,227,513,012,668,000 | -5,175,212,400,361,028,000 | 41.97076 | 78 | 0.632009 | false |
waterponey/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 55 | 2433 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause | -7,969,875,748,526,497,000 | -4,372,188,647,032,442,000 | 38.241935 | 79 | 0.628442 | false |
lulandco/SickRage | lib/github/GistHistoryState.py | 74 | 10159 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.NamedUser
import github.CommitStats
import github.Gist
class GistHistoryState(github.GithubObject.CompletableGithubObject):
"""
This class represents GistHistoryStates as returned for example by http://developer.github.com/v3/todo
"""
@property
def change_status(self):
"""
:type: :class:`github.CommitStats.CommitStats`
"""
self._completeIfNotSet(self._change_status)
return self._change_status.value
@property
def comments(self):
"""
:type: integer
"""
self._completeIfNotSet(self._comments)
return self._comments.value
@property
def comments_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._comments_url)
return self._comments_url.value
@property
def commits_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._commits_url)
return self._commits_url.value
@property
def committed_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._committed_at)
return self._committed_at.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def description(self):
"""
:type: string
"""
self._completeIfNotSet(self._description)
return self._description.value
@property
def files(self):
"""
:type: dict of string to :class:`github.GistFile.GistFile`
"""
self._completeIfNotSet(self._files)
return self._files.value
@property
def forks(self):
"""
:type: list of :class:`github.Gist.Gist`
"""
self._completeIfNotSet(self._forks)
return self._forks.value
@property
def forks_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._forks_url)
return self._forks_url.value
@property
def git_pull_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_pull_url)
return self._git_pull_url.value
@property
def git_push_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_push_url)
return self._git_push_url.value
@property
def history(self):
"""
:type: list of :class:`GistHistoryState`
"""
self._completeIfNotSet(self._history)
return self._history.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: string
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def owner(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._owner)
return self._owner.value
@property
def public(self):
"""
:type: bool
"""
self._completeIfNotSet(self._public)
return self._public.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def user(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._user)
return self._user.value
@property
def version(self):
"""
:type: string
"""
self._completeIfNotSet(self._version)
return self._version.value
def _initAttributes(self):
self._change_status = github.GithubObject.NotSet
self._comments = github.GithubObject.NotSet
self._comments_url = github.GithubObject.NotSet
self._commits_url = github.GithubObject.NotSet
self._committed_at = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._files = github.GithubObject.NotSet
self._forks = github.GithubObject.NotSet
self._forks_url = github.GithubObject.NotSet
self._git_pull_url = github.GithubObject.NotSet
self._git_push_url = github.GithubObject.NotSet
self._history = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._owner = github.GithubObject.NotSet
self._public = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
self._version = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "change_status" in attributes: # pragma no branch
self._change_status = self._makeClassAttribute(github.CommitStats.CommitStats, attributes["change_status"])
if "comments" in attributes: # pragma no branch
self._comments = self._makeIntAttribute(attributes["comments"])
if "comments_url" in attributes: # pragma no branch
self._comments_url = self._makeStringAttribute(attributes["comments_url"])
if "commits_url" in attributes: # pragma no branch
self._commits_url = self._makeStringAttribute(attributes["commits_url"])
if "committed_at" in attributes: # pragma no branch
self._committed_at = self._makeDatetimeAttribute(attributes["committed_at"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "files" in attributes: # pragma no branch
self._files = self._makeDictOfStringsToClassesAttribute(github.GistFile.GistFile, attributes["files"])
if "forks" in attributes: # pragma no branch
self._forks = self._makeListOfClassesAttribute(github.Gist.Gist, attributes["forks"])
if "forks_url" in attributes: # pragma no branch
self._forks_url = self._makeStringAttribute(attributes["forks_url"])
if "git_pull_url" in attributes: # pragma no branch
self._git_pull_url = self._makeStringAttribute(attributes["git_pull_url"])
if "git_push_url" in attributes: # pragma no branch
self._git_push_url = self._makeStringAttribute(attributes["git_push_url"])
if "history" in attributes: # pragma no branch
self._history = self._makeListOfClassesAttribute(GistHistoryState, attributes["history"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeStringAttribute(attributes["id"])
if "owner" in attributes: # pragma no branch
self._owner = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["owner"])
if "public" in attributes: # pragma no branch
self._public = self._makeBoolAttribute(attributes["public"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
if "version" in attributes: # pragma no branch
self._version = self._makeStringAttribute(attributes["version"])
| gpl-3.0 | -8,232,970,040,198,310,000 | 2,159,567,908,982,582,300 | 36.349265 | 119 | 0.568757 | false |
1st1/uvloop | tests/test_base.py | 1 | 25924 | import asyncio
import fcntl
import logging
import os
import sys
import threading
import time
import uvloop
import unittest
import weakref
from unittest import mock
from uvloop._testbase import UVTestCase, AIOTestCase
class _TestBase:
def test_close(self):
self.assertFalse(self.loop._closed)
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop._closed)
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = asyncio.Future(loop=self.loop)
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = self.loop.call_soon(lambda: None)
wd['h'] = h # Would fail without __weakref__ slot.
def test_call_soon_1(self):
calls = []
def cb(inc):
calls.append(inc)
self.loop.stop()
self.loop.call_soon(cb, 10)
h = self.loop.call_soon(cb, 100)
self.assertIn('.cb', repr(h))
h.cancel()
self.assertIn('cancelled', repr(h))
self.loop.call_soon(cb, 1)
self.loop.run_forever()
self.assertEqual(calls, [10, 1])
def test_call_soon_2(self):
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_soon(lambda f: f.set_result(None), waiter)
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_soon_3(self):
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_soon(lambda f=waiter: f.set_result(None))
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_soon_base_exc(self):
def cb():
raise KeyboardInterrupt()
self.loop.call_soon(cb)
with self.assertRaises(KeyboardInterrupt):
self.loop.run_forever()
self.assertFalse(self.loop.is_closed())
def test_calls_debug_reporting(self):
def run_test(debug, meth, stack_adj):
context = None
def handler(loop, ctx):
nonlocal context
context = ctx
self.loop.set_debug(debug)
self.loop.set_exception_handler(handler)
def cb():
1 / 0
meth(cb)
self.assertIsNone(context)
self.loop.run_until_complete(asyncio.sleep(0.05, loop=self.loop))
self.assertIs(type(context['exception']), ZeroDivisionError)
self.assertTrue(context['message'].startswith(
'Exception in callback'))
if debug:
tb = context['source_traceback']
self.assertEqual(tb[-1 + stack_adj].name, 'run_test')
else:
self.assertFalse('source_traceback' in context)
del context
for debug in (True, False):
for meth_name, meth, stack_adj in (
('call_soon',
self.loop.call_soon, 0),
('call_later', # `-1` accounts for lambda
lambda *args: self.loop.call_later(0.01, *args), -1)
):
with self.subTest(debug=debug, meth_name=meth_name):
run_test(debug, meth, stack_adj)
def test_now_update(self):
async def run():
st = self.loop.time()
time.sleep(0.05)
return self.loop.time() - st
delta = self.loop.run_until_complete(run())
self.assertTrue(delta > 0.049 and delta < 0.6)
def test_call_later_1(self):
calls = []
def cb(inc=10, stop=False):
calls.append(inc)
self.assertTrue(self.loop.is_running())
if stop:
self.loop.call_soon(self.loop.stop)
self.loop.call_later(0.05, cb)
# canceled right away
h = self.loop.call_later(0.05, cb, 100, True)
self.assertIn('.cb', repr(h))
h.cancel()
self.assertIn('cancelled', repr(h))
self.loop.call_later(0.05, cb, 1, True)
self.loop.call_later(1000, cb, 1000) # shouldn't be called
started = time.monotonic()
self.loop.run_forever()
finished = time.monotonic()
self.assertEqual(calls, [10, 1])
self.assertFalse(self.loop.is_running())
self.assertLess(finished - started, 0.1)
self.assertGreater(finished - started, 0.04)
def test_call_later_2(self):
# Test that loop.call_later triggers an update of
# libuv cached time.
async def main():
await asyncio.sleep(0.001, loop=self.loop)
time.sleep(0.01)
await asyncio.sleep(0.01, loop=self.loop)
started = time.monotonic()
self.loop.run_until_complete(main())
delta = time.monotonic() - started
self.assertGreater(delta, 0.019)
def test_call_later_3(self):
# a memory leak regression test
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_later(0.01, lambda f: f.set_result(None), waiter)
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_later_4(self):
# a memory leak regression test
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_later(0.01, lambda f=waiter: f.set_result(None))
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_later_negative(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop.stop()
self.loop.call_later(-1, cb, 'a')
self.loop.run_forever()
self.assertEqual(calls, ['a'])
def test_call_later_rounding(self):
# Refs #233, call_later() and call_at() shouldn't call cb early
def cb():
self.loop.stop()
for i in range(8):
self.loop.call_later(0.06 + 0.01, cb) # 0.06999999999999999
started = int(round(self.loop.time() * 1000))
self.loop.run_forever()
finished = int(round(self.loop.time() * 1000))
self.assertGreaterEqual(finished - started, 69)
def test_call_at(self):
if os.environ.get('TRAVIS_OS_NAME'):
# Time seems to be really unpredictable on Travis.
raise unittest.SkipTest('time is not monotonic on Travis')
i = 0
def cb(inc):
nonlocal i
i += inc
self.loop.stop()
at = self.loop.time() + 0.05
self.loop.call_at(at, cb, 100).cancel()
self.loop.call_at(at, cb, 10)
started = time.monotonic()
self.loop.run_forever()
finished = time.monotonic()
self.assertEqual(i, 10)
self.assertLess(finished - started, 0.07)
self.assertGreater(finished - started, 0.045)
def test_check_thread(self):
def check_thread(loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an "
"event loop other than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = self.new_loop()
try:
asyncio.set_event_loop(loop2)
check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = asyncio.Future(loop=loop)
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test_run_once_in_executor_plain(self):
called = []
def cb(arg):
called.append(arg)
async def runner():
await self.loop.run_in_executor(None, cb, 'a')
self.loop.run_until_complete(runner())
self.assertEqual(called, ['a'])
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
def test_run_until_complete_type_error(self):
self.assertRaises(
TypeError, self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = asyncio.Future(loop=self.loop)
other_loop = self.new_loop()
self.addCleanup(other_loop.close)
self.assertRaises(
ValueError, other_loop.run_until_complete, task)
def test_run_until_complete_error(self):
async def foo():
raise ValueError('aaa')
with self.assertRaisesRegex(ValueError, 'aaa'):
self.loop.run_until_complete(foo())
def test_run_until_complete_loop_orphan_future_close_loop(self):
if self.implementation == 'asyncio' and sys.version_info < (3, 6, 2):
raise unittest.SkipTest('unfixed asyncio')
class ShowStopper(BaseException):
pass
async def foo(delay):
await asyncio.sleep(delay, loop=self.loop)
def throw():
raise ShowStopper
self.loop.call_soon(throw)
try:
self.loop.run_until_complete(foo(0.1))
except ShowStopper:
pass
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_debug_slow_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
self.loop.call_soon(lambda: time.sleep(0.3))
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(asyncio.sleep(0, loop=self.loop))
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <Handle', msg)
self.assertIn('test_debug_slow_callbacks', msg)
def test_debug_slow_timer_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
self.loop.call_later(0.01, lambda: time.sleep(0.3))
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(asyncio.sleep(0.02, loop=self.loop))
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <TimerHandle', msg)
self.assertIn('test_debug_slow_timer_callbacks', msg)
def test_debug_slow_task_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
async def foo():
time.sleep(0.3)
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(foo())
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <Task finished', msg)
self.assertIn('test_debug_slow_task_callbacks', msg)
def test_default_exc_handler_callback(self):
self.loop.set_exception_handler(None)
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1 / 0
logger = logging.getLogger('asyncio')
# Test call_soon (events.Handle)
with mock.patch.object(logger, 'error') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
# Test call_later (events.TimerHandle)
with mock.patch.object(logger, 'error') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
def test_set_exc_handler_custom(self):
self.loop.set_exception_handler(None)
logger = logging.getLogger('asyncio')
def run_loop():
def zero_error():
self.loop.stop()
1 / 0
self.loop.call_soon(zero_error)
self.loop.run_forever()
errors = []
def handler(loop, exc):
errors.append(exc)
self.loop.set_debug(True)
if hasattr(self.loop, 'get_exception_handler'):
# Available since Python 3.5.2
self.assertIsNone(self.loop.get_exception_handler())
self.loop.set_exception_handler(handler)
if hasattr(self.loop, 'get_exception_handler'):
self.assertIs(self.loop.get_exception_handler(), handler)
run_loop()
self.assertEqual(len(errors), 1)
self.assertRegex(errors[-1]['message'],
'Exception in callback.*zero_error')
self.loop.set_exception_handler(None)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
self.assertEqual(len(errors), 1)
def test_set_exc_handler_broken(self):
logger = logging.getLogger('asyncio')
def run_loop():
def zero_error():
self.loop.stop()
1 / 0
self.loop.call_soon(zero_error)
self.loop.run_forever()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Unhandled error in exception handler'),
exc_info=mock.ANY)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError,
'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def _compile_agen(self, src):
try:
g = {}
exec(src, globals(), g)
except SyntaxError:
# Python < 3.6
raise unittest.SkipTest()
else:
return g['waiter']
def test_shutdown_asyncgens_01(self):
finalized = list()
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
waiter = self._compile_agen(
'''async def waiter(timeout, finalized, loop):
try:
await asyncio.sleep(timeout, loop=loop)
yield 1
finally:
await asyncio.sleep(0, loop=loop)
finalized.append(1)
''')
async def wait():
async for _ in waiter(1, finalized, self.loop):
pass
t1 = self.loop.create_task(wait())
t2 = self.loop.create_task(wait())
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.assertEqual(finalized, [1, 1])
# Silence warnings
t1.cancel()
t2.cancel()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
def test_shutdown_asyncgens_02(self):
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
logged = 0
def logger(loop, context):
nonlocal logged
self.assertIn('asyncgen', context)
expected = 'an error occurred during closing of asynchronous'
if expected in context['message']:
logged += 1
waiter = self._compile_agen('''async def waiter(timeout, loop):
try:
await asyncio.sleep(timeout, loop=loop)
yield 1
finally:
1 / 0
''')
async def wait():
async for _ in waiter(1, self.loop):
pass
t = self.loop.create_task(wait())
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
self.loop.set_exception_handler(logger)
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.assertEqual(logged, 1)
# Silence warnings
t.cancel()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
def test_shutdown_asyncgens_03(self):
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
waiter = self._compile_agen('''async def waiter():
yield 1
yield 2
''')
async def foo():
# We specifically want to hit _asyncgen_finalizer_hook
# method.
await waiter().asend(None)
self.loop.run_until_complete(foo())
self.loop.run_until_complete(asyncio.sleep(0.01, loop=self.loop))
def test_inf_wait_for(self):
async def foo():
await asyncio.sleep(0.1, loop=self.loop)
return 123
res = self.loop.run_until_complete(
asyncio.wait_for(foo(), timeout=float('inf'), loop=self.loop))
self.assertEqual(res, 123)
class TestBaseUV(_TestBase, UVTestCase):
def test_loop_create_future(self):
fut = self.loop.create_future()
self.assertTrue(isinstance(fut, asyncio.Future))
self.assertIs(fut._loop, self.loop)
fut.cancel()
def test_loop_call_soon_handle_cancelled(self):
cb = lambda: False # NoQA
handle = self.loop.call_soon(cb)
self.assertFalse(handle.cancelled())
handle.cancel()
self.assertTrue(handle.cancelled())
handle = self.loop.call_soon(cb)
self.assertFalse(handle.cancelled())
self.run_loop_briefly()
self.assertFalse(handle.cancelled())
def test_loop_call_later_handle_cancelled(self):
cb = lambda: False # NoQA
handle = self.loop.call_later(0.01, cb)
self.assertFalse(handle.cancelled())
handle.cancel()
self.assertTrue(handle.cancelled())
handle = self.loop.call_later(0.01, cb)
self.assertFalse(handle.cancelled())
self.run_loop_briefly(delay=0.05)
self.assertFalse(handle.cancelled())
def test_loop_std_files_cloexec(self):
# See https://github.com/MagicStack/uvloop/issues/40 for details.
for fd in {0, 1, 2}:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
self.assertFalse(flags & fcntl.FD_CLOEXEC)
def test_default_exc_handler_broken(self):
logger = logging.getLogger('asyncio')
_context = None
class Loop(uvloop.Loop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
self.addCleanup(lambda: asyncio.set_event_loop(None))
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
loop.stop()
1 / 0
loop.call_soon(zero_error)
loop.run_forever()
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
class TestBaseAIO(_TestBase, AIOTestCase):
pass
class TestPolicy(unittest.TestCase):
def test_uvloop_policy(self):
try:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.new_event_loop()
try:
self.assertIsInstance(loop, uvloop.Loop)
finally:
loop.close()
finally:
asyncio.set_event_loop_policy(None)
@unittest.skipUnless(hasattr(asyncio, '_get_running_loop'),
'No asyncio._get_running_loop')
def test_running_loop_within_a_loop(self):
@asyncio.coroutine
def runner(loop):
loop.run_forever()
try:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
finally:
asyncio.set_event_loop_policy(None)
@unittest.skipUnless(hasattr(asyncio, '_get_running_loop'),
'No asyncio._get_running_loop')
def test_get_event_loop_returns_running_loop(self):
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise NotImplementedError
loop = None
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = uvloop.new_event_loop()
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
self.assertIs(asyncio._get_running_loop(), None)
| mit | 2,296,612,169,853,023,000 | -4,229,270,760,784,409,600 | 30.965475 | 78 | 0.56411 | false |
django-notifications/django-notifications | notifications/migrations/0001_initial.py | 1 | 2201 | # -*- coding: utf-8 -*-
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import swapper
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.CharField(default='info', max_length=20, choices=[('success', 'success'), ('info', 'info'), ('warning', 'warning'), ('error', 'error')])),
('unread', models.BooleanField(default=True)),
('actor_object_id', models.CharField(max_length=255)),
('verb', models.CharField(max_length=255)),
('description', models.TextField(null=True, blank=True)),
('target_object_id', models.CharField(max_length=255, null=True, blank=True)),
('action_object_object_id', models.CharField(max_length=255, null=True, blank=True)),
('timestamp', models.DateTimeField(default=django.utils.timezone.now)),
('public', models.BooleanField(default=True)),
('action_object_content_type', models.ForeignKey(related_name='notify_action_object', blank=True, to='contenttypes.ContentType', null=True, on_delete=models.CASCADE)),
('actor_content_type', models.ForeignKey(related_name='notify_actor', to='contenttypes.ContentType', on_delete=models.CASCADE)),
('recipient', models.ForeignKey(related_name='notifications', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
('target_content_type', models.ForeignKey(related_name='notify_target', blank=True, to='contenttypes.ContentType', null=True, on_delete=models.CASCADE)),
],
options={
'swappable': swapper.swappable_setting('notifications', 'Notification'),
'ordering': ('-timestamp',),
},
bases=(models.Model,),
),
]
| bsd-3-clause | 2,478,794,827,065,514,000 | 8,457,015,609,387,007,000 | 54.025 | 183 | 0.614266 | false |
catapult-project/catapult | third_party/gsutil/gslib/vendored/boto/boto/ec2/tag.py | 181 | 3076 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class TagSet(dict):
"""
A TagSet is used to collect the tags associated with a particular
EC2 resource. Not all resources can be tagged but for those that
can, this dict object will be used to collect those values. See
:class:`boto.ec2.ec2object.TaggedEC2Object` for more details.
"""
def __init__(self, connection=None):
self.connection = connection
self._current_key = None
self._current_value = None
def startElement(self, name, attrs, connection):
if name == 'item':
self._current_key = None
self._current_value = None
return None
def endElement(self, name, value, connection):
if name == 'key':
self._current_key = value
elif name == 'value':
self._current_value = value
elif name == 'item':
self[self._current_key] = self._current_value
class Tag(object):
"""
A Tag is used when creating or listing all tags related to
an AWS account. It records not only the key and value but
also the ID of the resource to which the tag is attached
as well as the type of the resource.
"""
def __init__(self, connection=None, res_id=None, res_type=None,
name=None, value=None):
self.connection = connection
self.res_id = res_id
self.res_type = res_type
self.name = name
self.value = value
def __repr__(self):
return 'Tag:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'resourceId':
self.res_id = value
elif name == 'resourceType':
self.res_type = value
elif name == 'key':
self.name = value
elif name == 'value':
self.value = value
else:
setattr(self, name, value)
| bsd-3-clause | 330,997,034,158,811,100 | -1,900,873,084,006,728,400 | 35.619048 | 74 | 0.652146 | false |
gsalgado/pyethapp | pyethapp/accounts.py | 1 | 20516 | import json
import os
from random import SystemRandom
import shutil
from uuid import UUID
from devp2p.service import BaseService
from ethereum.tools import keys
from ethereum.slogging import get_logger
from ethereum.utils import privtopub # this is different than the one used in devp2p.crypto
from ethereum.utils import sha3, is_string, decode_hex, remove_0x_head
log = get_logger('accounts')
DEFAULT_COINBASE = 'de0b295669a9fd93d5f28d9ec85e40f4cb697bae'.decode('hex')
random = SystemRandom()
def mk_privkey(seed):
return sha3(seed)
def mk_random_privkey():
k = hex(random.getrandbits(256))[2:-1].zfill(64)
assert len(k) == 64
return k.decode('hex')
class Account(object):
"""Represents an account.
:ivar keystore: the key store as a dictionary (as decoded from json)
:ivar locked: `True` if the account is locked and neither private nor public keys can be
accessed, otherwise `False`
:ivar path: absolute path to the associated keystore file (`None` for in-memory accounts)
"""
def __init__(self, keystore, password=None, path=None):
self.keystore = keystore
try:
self._address = self.keystore['address'].decode('hex')
except KeyError:
self._address = None
self.locked = True
if password is not None:
self.unlock(password)
if path is not None:
self.path = os.path.abspath(path)
else:
self.path = None
@classmethod
def new(cls, password, key=None, uuid=None, path=None):
"""Create a new account.
Note that this creates the account in memory and does not store it on disk.
:param password: the password used to encrypt the private key
:param key: the private key, or `None` to generate a random one
:param uuid: an optional id
"""
if key is None:
key = mk_random_privkey()
keystore = keys.make_keystore_json(key, password)
keystore['id'] = uuid
return Account(keystore, password, path)
@classmethod
def load(cls, path, password=None):
"""Load an account from a keystore file.
:param path: full path to the keyfile
:param password: the password to decrypt the key file or `None` to leave it encrypted
"""
with open(path) as f:
keystore = json.load(f)
if not keys.check_keystore_json(keystore):
raise ValueError('Invalid keystore file')
return Account(keystore, password, path=path)
def dump(self, include_address=True, include_id=True):
"""Dump the keystore for later disk storage.
The result inherits the entries `'crypto'` and `'version`' from `account.keystore`, and
adds `'address'` and `'id'` in accordance with the parameters `'include_address'` and
`'include_id`'.
If address or id are not known, they are not added, even if requested.
:param include_address: flag denoting if the address should be included or not
:param include_id: flag denoting if the id should be included or not
"""
d = {}
d['crypto'] = self.keystore['crypto']
d['version'] = self.keystore['version']
if include_address and self.address is not None:
d['address'] = self.address.encode('hex')
if include_id and self.uuid is not None:
d['id'] = self.uuid
return json.dumps(d)
def unlock(self, password):
"""Unlock the account with a password.
If the account is already unlocked, nothing happens, even if the password is wrong.
:raises: :exc:`ValueError` (originating in ethereum.keys) if the password is wrong (and the
account is locked)
"""
if self.locked:
self._privkey = keys.decode_keystore_json(self.keystore, password)
self.locked = False
self.address # get address such that it stays accessible after a subsequent lock
def lock(self):
"""Relock an unlocked account.
This method sets `account.privkey` to `None` (unlike `account.address` which is preserved).
After calling this method, both `account.privkey` and `account.pubkey` are `None.
`account.address` stays unchanged, even if it has been derived from the private key.
"""
self._privkey = None
self.locked = True
@property
def privkey(self):
"""The account's private key or `None` if the account is locked"""
if not self.locked:
return self._privkey
else:
return None
@property
def pubkey(self):
"""The account's public key or `None` if the account is locked"""
if not self.locked:
return privtopub(self.privkey)
else:
return None
@property
def address(self):
"""The account's address or `None` if the address is not stored in the key file and cannot
be reconstructed (because the account is locked)
"""
if self._address:
pass
elif 'address' in self.keystore:
self._address = self.keystore['address'].decode('hex')
elif not self.locked:
self._address = keys.privtoaddr(self.privkey)
else:
return None
return self._address
@property
def uuid(self):
"""An optional unique identifier, formatted according to UUID version 4, or `None` if the
account does not have an id
"""
try:
return self.keystore['id']
except KeyError:
return None
@uuid.setter
def uuid(self, value):
"""Set the UUID. Set it to `None` in order to remove it."""
if value is not None:
self.keystore['id'] = value
elif 'id' in self.keystore:
self.keystore.pop('id')
def sign_tx(self, tx):
"""Sign a Transaction with the private key of this account.
If the account is unlocked, this is equivalent to ``tx.sign(account.privkey)``.
:param tx: the :class:`ethereum.transactions.Transaction` to sign
:raises: :exc:`ValueError` if the account is locked
"""
if self.privkey:
log.info('signing tx', tx=tx, account=self)
tx.sign(self.privkey)
else:
raise ValueError('Locked account cannot sign tx')
def __repr__(self):
if self.address is not None:
address = self.address.encode('hex')
else:
address = '?'
return '<Account(address={address}, id={id})>'.format(address=address, id=self.uuid)
class AccountsService(BaseService):
"""Service that manages accounts.
At initialization, this service collects the accounts stored as key files in the keystore
directory (config option `accounts.keystore_dir`) and below.
To add more accounts, use :method:`add_account`.
:ivar accounts: the :class:`Account`s managed by this service, sorted by the paths to their
keystore files
:ivar keystore_dir: absolute path to the keystore directory
"""
name = 'accounts'
default_config = dict(accounts=dict(keystore_dir='keystore', must_include_coinbase=True))
def __init__(self, app):
super(AccountsService, self).__init__(app)
self.keystore_dir = app.config['accounts']['keystore_dir']
if not os.path.isabs(self.keystore_dir):
self.keystore_dir = os.path.abspath(os.path.join(app.config['data_dir'],
self.keystore_dir))
assert os.path.isabs(self.keystore_dir)
self.accounts = []
if not os.path.exists(self.keystore_dir):
log.warning('keystore directory does not exist', directory=self.keystore_dir)
elif not os.path.isdir(self.keystore_dir):
log.error('configured keystore directory is a file, not a directory',
directory=self.keystore_dir)
else:
# traverse file tree rooted at keystore_dir
log.info('searching for key files', directory=self.keystore_dir)
for dirpath, _, filenames in os.walk(self.keystore_dir):
for filename in [os.path.join(dirpath, filename) for filename in filenames]:
try:
self.accounts.append(Account.load(filename))
except ValueError:
log.warning('invalid file skipped in keystore directory',
path=filename)
self.accounts.sort(key=lambda account: account.path) # sort accounts by path
if not self.accounts:
log.warn('no accounts found')
else:
log.info('found account(s)', accounts=self.accounts)
@property
def coinbase(self):
"""Return the address that should be used as coinbase for new blocks.
The coinbase address is given by the config field pow.coinbase_hex. If this does not exist
or is `None`, the address of the first account is used instead. If there are no accounts,
the coinbase is `DEFAULT_COINBASE`.
:raises: :exc:`ValueError` if the coinbase is invalid (no string, wrong length) or there is
no account for it and the config flag `accounts.check_coinbase` is set (does not
apply to the default coinbase)
"""
cb_hex = self.app.config.get('pow', {}).get('coinbase_hex')
if cb_hex is None:
if not self.accounts_with_address:
return DEFAULT_COINBASE
cb = self.accounts_with_address[0].address
else:
if not is_string(cb_hex):
raise ValueError('coinbase must be string')
try:
cb = decode_hex(remove_0x_head(cb_hex))
except (ValueError, TypeError):
raise ValueError('invalid coinbase')
if len(cb) != 20:
raise ValueError('wrong coinbase length')
if self.config['accounts']['must_include_coinbase']:
if cb not in (acct.address for acct in self.accounts):
raise ValueError('no account for coinbase')
return cb
def add_account(self, account, store=True, include_address=True, include_id=True):
"""Add an account.
If `store` is true the account will be stored as a key file at the location given by
`account.path`. If this is `None` a :exc:`ValueError` is raised. `include_address` and
`include_id` determine if address and id should be removed for storage or not.
This method will raise a :exc:`ValueError` if the new account has the same UUID as an
account already known to the service. Note that address collisions do not result in an
exception as those may slip through anyway for locked accounts with hidden addresses.
"""
log.info('adding account', account=account)
if account.uuid is not None:
if len([acct for acct in self.accounts if acct.uuid == account.uuid]) > 0:
log.error('could not add account (UUID collision)', uuid=account.uuid)
raise ValueError('Could not add account (UUID collision)')
if store:
if account.path is None:
raise ValueError('Cannot store account without path')
assert os.path.isabs(account.path), account.path
if os.path.exists(account.path):
log.error('File does already exist', path=account.path)
raise IOError('File does already exist')
assert account.path not in [acct.path for acct in self.accounts]
try:
directory = os.path.dirname(account.path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(account.path, 'w') as f:
f.write(account.dump(include_address, include_id))
except IOError as e:
log.error('Could not write to file', path=account.path, message=e.strerror,
errno=e.errno)
raise
self.accounts.append(account)
self.accounts.sort(key=lambda account: account.path)
def update_account(self, account, new_password, include_address=True, include_id=True):
"""Replace the password of an account.
The update is carried out in three steps:
1) the old keystore file is renamed
2) the new keystore file is created at the previous location of the old keystore file
3) the old keystore file is removed
In this way, at least one of the keystore files exists on disk at any time and can be
recovered if the process is interrupted.
:param account: the :class:`Account` which must be unlocked, stored on disk and included in
:attr:`AccountsService.accounts`.
:param include_address: forwarded to :meth:`add_account` during step 2
:param include_id: forwarded to :meth:`add_account` during step 2
:raises: :exc:`ValueError` if the account is locked, if it is not added to the account
manager, or if it is not stored
"""
if account not in self.accounts:
raise ValueError('Account not managed by account service')
if account.locked:
raise ValueError('Cannot update locked account')
if account.path is None:
raise ValueError('Account not stored on disk')
assert os.path.isabs(account.path)
# create new account
log.debug('creating new account')
new_account = Account.new(new_password, key=account.privkey, uuid=account.uuid)
new_account.path = account.path
# generate unique path and move old keystore file there
backup_path = account.path + '~'
i = 1
while os.path.exists(backup_path):
backup_path = backup_path[:backup_path.rfind('~') + 1] + str(i)
i += 1
assert not os.path.exists(backup_path)
log.info('moving old keystore file to backup location', **{'from': account.path,
'to': backup_path})
try:
shutil.move(account.path, backup_path)
except:
log.error('could not backup keystore, stopping account update',
**{'from': account.path, 'to': backup_path})
raise
assert os.path.exists(backup_path)
assert not os.path.exists(new_account.path)
account.path = backup_path
# remove old account from manager (not from disk yet) and add new account
self.accounts.remove(account)
assert account not in self.accounts
try:
self.add_account(new_account, include_address, include_id)
except:
log.error('adding new account failed, recovering from backup')
shutil.move(backup_path, new_account.path)
self.accounts.append(account)
self.accounts.sort(key=lambda account: account.path)
raise
assert os.path.exists(new_account.path)
assert new_account in self.accounts
# everything was successful (we are still here), so delete old keystore file
log.info('deleting backup of old keystore', path=backup_path)
try:
os.remove(backup_path)
except:
log.error('failed to delete no longer needed backup of old keystore',
path=account.path)
raise
# set members of account to values of new_account
account.keystore = new_account.keystore
account.path = new_account.path
assert account.__dict__ == new_account.__dict__
# replace new_account by old account in account list
self.accounts.append(account)
self.accounts.remove(new_account)
self.accounts.sort(key=lambda account: account.path)
log.debug('account update successful')
@property
def accounts_with_address(self):
"""Return a list of accounts whose address is known."""
return [account for account in self if account.address]
@property
def unlocked_accounts(self):
"""Return a list of all unlocked accounts."""
return [account for account in self if not account.locked]
def find(self, identifier):
"""Find an account by either its address, its id or its index as string.
Example identifiers:
- '9c0e0240776cfbe6fa1eb37e57721e1a88a563d1' (address)
- '0x9c0e0240776cfbe6fa1eb37e57721e1a88a563d1' (address with 0x prefix)
- '01dd527b-f4a5-4b3c-9abb-6a8e7cd6722f' (UUID)
- '3' (index)
:param identifier: the accounts hex encoded, case insensitive address (with optional 0x
prefix), its UUID or its index (as string, >= 1) in
`account_service.accounts`
:raises: :exc:`ValueError` if the identifier could not be interpreted
:raises: :exc:`KeyError` if the identified account is not known to the account_service
"""
try:
uuid = UUID(identifier)
except ValueError:
pass
else:
return self.get_by_id(str(uuid))
try:
index = int(identifier, 10)
except ValueError:
pass
else:
if index <= 0:
raise ValueError('Index must be 1 or greater')
try:
return self.accounts[index - 1]
except IndexError as e:
raise KeyError(e.message)
if identifier[:2] == '0x':
identifier = identifier[2:]
try:
address = identifier.decode('hex')
except TypeError:
success = False
else:
if len(address) != 20:
success = False
else:
return self[address]
assert not success
raise ValueError('Could not interpret account identifier')
def get_by_id(self, id):
"""Return the account with a given id.
Note that accounts are not required to have an id.
:raises: `KeyError` if no matching account can be found
"""
accts = [acct for acct in self.accounts if UUID(acct.uuid) == UUID(id)]
assert len(accts) <= 1
if len(accts) == 0:
raise KeyError('account with id {} unknown'.format(id))
elif len(accts) > 1:
log.warning('multiple accounts with same UUID found', uuid=id)
return accts[0]
def get_by_address(self, address):
"""Get an account by its address.
Note that even if an account with the given address exists, it might not be found if it is
locked. Also, multiple accounts with the same address may exist, in which case the first
one is returned (and a warning is logged).
:raises: `KeyError` if no matching account can be found
"""
assert len(address) == 20
accounts = [account for account in self.accounts if account.address == address]
if len(accounts) == 0:
raise KeyError('account not found by address', address=address.encode('hex'))
elif len(accounts) > 1:
log.warning('multiple accounts with same address found', address=address.encode('hex'))
return accounts[0]
def sign_tx(self, address, tx):
self.get_by_address(address).sign_tx(tx)
def propose_path(self, address):
return os.path.join(self.keystore_dir, address.encode('hex'))
def __contains__(self, address):
assert len(address) == 20
return address in [a.address for a in self.accounts]
def __getitem__(self, address_or_idx):
if isinstance(address_or_idx, bytes):
address = address_or_idx
assert len(address) == 20
for a in self.accounts:
if a.address == address:
return a
raise KeyError
else:
assert isinstance(address_or_idx, int)
return self.accounts[address_or_idx]
def __iter__(self):
return iter(self.accounts)
def __len__(self):
return len(self.accounts)
"""
--import-key = key.json
--unlock <password dialog>
--password passwordfile
--newkey <password dialog>
"""
| mit | -3,795,094,643,868,558,300 | 7,397,270,734,502,119,000 | 38.152672 | 99 | 0.604748 | false |
imsparsh/python-for-android | python3-alpha/extra_modules/pyxmpp2/ext/component.py | 46 | 7090 | #
# (C) Copyright 2003-2010 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""Component (jabber:component:accept) stream handling.
Normative reference:
- `JEP 114 <http://www.jabber.org/jeps/jep-0114.html>`__
"""
raise ImportError("{0} is not yet rewritten for PyXMPP2".format(__name__))
__docformat__="restructuredtext en"
import hashlib
import logging
from ..stream import Stream
from ..streambase import stanza_factory,HostMismatch
from ..xmlextra import common_doc,common_root
from ..utils import to_utf8
from ..exceptions import StreamError,FatalStreamError,ComponentStreamError,FatalComponentStreamError
class ComponentStream(Stream):
"""Handles jabberd component (jabber:component:accept) connection stream.
:Ivariables:
- `server`: server to use.
- `port`: port number to use.
- `secret`: authentication secret.
:Types:
- `server`: `str`
- `port`: `int`
- `secret`: `str`"""
def __init__(self, jid, secret, server, port, keepalive = 0, owner = None):
"""Initialize a `ComponentStream` object.
:Parameters:
- `jid`: JID of the component.
- `secret`: authentication secret.
- `server`: server address.
- `port`: TCP port number on the server.
- `keepalive`: keepalive interval. 0 to disable.
- `owner`: `Client`, `Component` or similar object "owning" this stream.
"""
Stream.__init__(self, "jabber:component:accept",
sasl_mechanisms = [],
tls_settings = None,
keepalive = keepalive,
owner = owner)
self.server=server
self.port=port
self.me=jid
self.secret=secret
self.process_all_stanzas=1
self.__logger=logging.getLogger("pyxmpp2.jabberd.ComponentStream")
def _reset(self):
"""Reset `ComponentStream` object state, making the object ready to
handle new connections."""
Stream._reset(self)
def connect(self,server=None,port=None):
"""Establish a client connection to a server.
[component only]
:Parameters:
- `server`: name or address of the server to use. If not given
then use the one specified when creating the object.
- `port`: port number of the server to use. If not given then use
the one specified when creating the object.
:Types:
- `server`: `str`
- `port`: `int`"""
self.lock.acquire()
try:
self._connect(server,port)
finally:
self.lock.release()
def _connect(self,server=None,port=None):
"""Same as `ComponentStream.connect` but assume `self.lock` is acquired."""
if self.me.node or self.me.resource:
raise Value("Component JID may have only domain defined")
if not server:
server=self.server
if not port:
port=self.port
if not server or not port:
raise ValueError("Server or port not given")
Stream._connect(self,server,port,None,self.me)
def accept(self,sock):
"""Accept an incoming component connection.
[server only]
:Parameters:
- `sock`: a listening socket."""
Stream.accept(self,sock,None)
def stream_start(self,doc):
"""Process <stream:stream> (stream start) tag received from peer.
Call `Stream.stream_start`, but ignore any `HostMismatch` error.
:Parameters:
- `doc`: document created by the parser"""
try:
Stream.stream_start(self,doc)
except HostMismatch:
pass
def _post_connect(self):
"""Initialize authentication when the connection is established
and we are the initiator."""
if self.initiator:
self._auth()
def _compute_handshake(self):
"""Compute the authentication handshake value.
:return: the computed hash value.
:returntype: `str`"""
return hashlib.sha1(to_utf8(self.stream_id)+to_utf8(self.secret)).hexdigest()
def _auth(self):
"""Authenticate on the server.
[component only]"""
if self.authenticated:
self.__logger.debug("_auth: already authenticated")
return
self.__logger.debug("doing handshake...")
hash_value=self._compute_handshake()
n=common_root.newTextChild(None,"handshake",hash_value)
self._write_node(n)
n.unlinkNode()
n.freeNode()
self.__logger.debug("handshake hash sent.")
def _process_node(self,node):
"""Process first level element of the stream.
Handle component handshake (authentication) element, and
treat elements in "jabber:component:accept", "jabber:client"
and "jabber:server" equally (pass to `self.process_stanza`).
All other elements are passed to `Stream._process_node`.
:Parameters:
- `node`: XML node describing the element
"""
ns=node.ns()
if ns:
ns_uri=node.ns().getContent()
if (not ns or ns_uri=="jabber:component:accept") and node.name=="handshake":
if self.initiator and not self.authenticated:
self.authenticated=1
self.state_change("authenticated",self.me)
self._post_auth()
return
elif not self.authenticated and node.getContent()==self._compute_handshake():
self.peer=self.me
n=common_doc.newChild(None,"handshake",None)
self._write_node(n)
n.unlinkNode()
n.freeNode()
self.peer_authenticated=1
self.state_change("authenticated",self.peer)
self._post_auth()
return
else:
self._send_stream_error("not-authorized")
raise FatalComponentStreamError("Hanshake error.")
if ns_uri in ("jabber:component:accept","jabber:client","jabber:server"):
stanza=stanza_factory(node)
self.lock.release()
try:
self.process_stanza(stanza)
finally:
self.lock.acquire()
stanza.free()
return
return Stream._process_node(self,node)
# vi: sts=4 et sw=4
| apache-2.0 | 6,937,081,575,419,383,000 | 1,580,700,429,841,705,200 | 33.585366 | 100 | 0.599154 | false |
lgp171188/fjord | vendor/packages/urllib3/dummyserver/handlers.py | 10 | 7113 | from __future__ import print_function
import gzip
import json
import logging
import sys
import time
import zlib
from io import BytesIO
from tornado.wsgi import HTTPRequest
try:
from urllib.parse import urlsplit
except ImportError:
from urlparse import urlsplit
log = logging.getLogger(__name__)
class Response(object):
def __init__(self, body='', status='200 OK', headers=None):
if not isinstance(body, bytes):
body = body.encode('utf8')
self.body = body
self.status = status
self.headers = headers or [("Content-type", "text/plain")]
def __call__(self, environ, start_response):
start_response(self.status, self.headers)
return [self.body]
class WSGIHandler(object):
pass
class TestingApp(WSGIHandler):
"""
Simple app that performs various operations, useful for testing an HTTP
library.
Given any path, it will attempt to convert it will load a corresponding
local method if it exists. Status code 200 indicates success, 400 indicates
failure. Each method has its own conditions for success/failure.
"""
def __call__(self, environ, start_response):
req = HTTPRequest(environ)
req.params = {}
for k, v in req.arguments.items():
req.params[k] = next(iter(v))
path = req.path[:]
if not path.startswith('/'):
path = urlsplit(path).path
target = path[1:].replace('/', '_')
method = getattr(self, target, self.index)
resp = method(req)
if dict(resp.headers).get('Connection') == 'close':
# FIXME: Can we kill the connection somehow?
pass
return resp(environ, start_response)
def index(self, _request):
"Render simple message"
return Response("Dummy server!")
def set_up(self, request):
test_type = request.params.get('test_type')
test_id = request.params.get('test_id')
if test_id:
print('\nNew test %s: %s' % (test_type, test_id))
else:
print('\nNew test %s' % test_type)
return Response("Dummy server is ready!")
def specific_method(self, request):
"Confirm that the request matches the desired method type"
method = request.params.get('method')
if method and not isinstance(method, str):
method = method.decode('utf8')
if request.method != method:
return Response("Wrong method: %s != %s" %
(method, request.method), status='400 Bad Request')
return Response()
def upload(self, request):
"Confirm that the uploaded file conforms to specification"
# FIXME: This is a huge broken mess
param = request.params.get('upload_param', 'myfile').decode('ascii')
filename = request.params.get('upload_filename', '').decode('utf-8')
size = int(request.params.get('upload_size', '0'))
files_ = request.files.get(param)
if len(files_) != 1:
return Response("Expected 1 file for '%s', not %d" %(param, len(files_)),
status='400 Bad Request')
file_ = files_[0]
data = file_['body']
if int(size) != len(data):
return Response("Wrong size: %d != %d" %
(size, len(data)), status='400 Bad Request')
if filename != file_['filename']:
return Response("Wrong filename: %s != %s" %
(filename, file_.filename),
status='400 Bad Request')
return Response()
def redirect(self, request):
"Perform a redirect to ``target``"
target = request.params.get('target', '/')
headers = [('Location', target)]
return Response(status='303 See Other', headers=headers)
def keepalive(self, request):
if request.params.get('close', b'0') == b'1':
headers = [('Connection', 'close')]
return Response('Closing', headers=headers)
headers = [('Connection', 'keep-alive')]
return Response('Keeping alive', headers=headers)
def sleep(self, request):
"Sleep for a specified amount of ``seconds``"
seconds = float(request.params.get('seconds', '1'))
time.sleep(seconds)
return Response()
def echo(self, request):
"Echo back the params"
if request.method == 'GET':
return Response(request.query)
return Response(request.body)
def encodingrequest(self, request):
"Check for UA accepting gzip/deflate encoding"
data = b"hello, world!"
encoding = request.headers.get('Accept-Encoding', '')
headers = None
if encoding == 'gzip':
headers = [('Content-Encoding', 'gzip')]
file_ = BytesIO()
zipfile = gzip.GzipFile('', mode='w', fileobj=file_)
zipfile.write(data)
zipfile.close()
data = file_.getvalue()
elif encoding == 'deflate':
headers = [('Content-Encoding', 'deflate')]
data = zlib.compress(data)
elif encoding == 'garbage-gzip':
headers = [('Content-Encoding', 'gzip')]
data = 'garbage'
elif encoding == 'garbage-deflate':
headers = [('Content-Encoding', 'deflate')]
data = 'garbage'
return Response(data, headers=headers)
def headers(self, request):
return Response(json.dumps(request.headers))
def shutdown(self, request):
sys.exit()
# RFC2231-aware replacement of internal tornado function
def _parse_header(line):
r"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
>>> d = _parse_header("CD: fd; foo=\"bar\"; file*=utf-8''T%C3%A4st")[1]
>>> d['file'] == 'T\u00e4st'
True
>>> d['foo']
'bar'
"""
import tornado.httputil
import email.utils
from urllib3.packages import six
if not six.PY3:
line = line.encode('utf-8')
parts = tornado.httputil._parseparam(';' + line)
key = next(parts)
# decode_params treats first argument special, but we already stripped key
params = [('Dummy', 'value')]
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1:].strip()
params.append((name, value))
params = email.utils.decode_params(params)
params.pop(0) # get rid of the dummy again
pdict = {}
for name, value in params:
print(repr(value))
value = email.utils.collapse_rfc2231_value(value)
if len(value) >= 2 and value[0] == '"' and value[-1] == '"':
value = value[1:-1]
pdict[name] = value
return key, pdict
# TODO: make the following conditional as soon as we know a version
# which does not require this fix.
# See https://github.com/facebook/tornado/issues/868
if True:
import tornado.httputil
tornado.httputil._parse_header = _parse_header
| bsd-3-clause | -6,391,620,145,941,117,000 | -5,938,686,539,432,547,000 | 31.62844 | 85 | 0.578799 | false |
tsufiev/horizon | openstack_dashboard/dashboards/admin/volumes/snapshots/views.py | 3 | 2439 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.admin.volumes.snapshots \
import forms as vol_snapshot_forms
from openstack_dashboard.dashboards.admin.volumes.snapshots \
import tabs as vol_snapshot_tabs
from openstack_dashboard.dashboards.project.volumes.snapshots \
import views
class UpdateStatusView(forms.ModalFormView):
form_class = vol_snapshot_forms.UpdateStatus
template_name = 'admin/volumes/snapshots/update_status.html'
success_url = reverse_lazy("horizon:admin:volumes:snapshots_tab")
page_title = _("Update Volume Snapshot Status")
@memoized.memoized_method
def get_object(self):
snap_id = self.kwargs['snapshot_id']
try:
self._object = cinder.volume_snapshot_get(self.request,
snap_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume snapshot.'),
redirect=self.success_url)
return self._object
def get_context_data(self, **kwargs):
context = super(UpdateStatusView, self).get_context_data(**kwargs)
context['snapshot_id'] = self.kwargs["snapshot_id"]
return context
def get_initial(self):
snapshot = self.get_object()
return {'snapshot_id': self.kwargs["snapshot_id"],
'status': snapshot.status}
class DetailView(views.DetailView):
tab_group_class = vol_snapshot_tabs.SnapshotDetailsTabs
@staticmethod
def get_redirect_url():
return reverse('horizon:admin:volumes:index')
| apache-2.0 | 2,677,467,909,405,860,000 | 7,867,192,688,650,373,000 | 36.523077 | 75 | 0.694137 | false |
SchrodingersGat/InvenTree | InvenTree/report/serializers.py | 2 | 2030 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from InvenTree.serializers import InvenTreeModelSerializer
from InvenTree.serializers import InvenTreeAttachmentSerializerField
from .models import TestReport
from .models import BuildReport
from .models import BillOfMaterialsReport
from .models import PurchaseOrderReport, SalesOrderReport
class TestReportSerializer(InvenTreeModelSerializer):
template = InvenTreeAttachmentSerializerField(required=True)
class Meta:
model = TestReport
fields = [
'pk',
'name',
'description',
'template',
'filters',
'enabled',
]
class BuildReportSerializer(InvenTreeModelSerializer):
template = InvenTreeAttachmentSerializerField(required=True)
class Meta:
model = BuildReport
fields = [
'pk',
'name',
'description',
'template',
'filters',
'enabled',
]
class BOMReportSerializer(InvenTreeModelSerializer):
template = InvenTreeAttachmentSerializerField(required=True)
class Meta:
model = BillOfMaterialsReport
fields = [
'pk',
'name',
'description',
'template',
'filters',
'enabled',
]
class POReportSerializer(InvenTreeModelSerializer):
template = InvenTreeAttachmentSerializerField(required=True)
class Meta:
model = PurchaseOrderReport
fields = [
'pk',
'name',
'description',
'template',
'filters',
'enabled',
]
class SOReportSerializer(InvenTreeModelSerializer):
template = InvenTreeAttachmentSerializerField(required=True)
class Meta:
model = SalesOrderReport
fields = [
'pk',
'name',
'description',
'template',
'filters',
'enabled',
]
| mit | -7,084,129,858,369,560,000 | -716,039,872,302,816,100 | 21.555556 | 68 | 0.582759 | false |
louietsai/python-for-android | python-modules/twisted/twisted/spread/flavors.py | 56 | 21733 | # -*- test-case-name: twisted.test.test_pb -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module represents flavors of remotely acessible objects.
Currently this is only objects accessible through Perspective Broker, but will
hopefully encompass all forms of remote access which can emulate subsets of PB
(such as XMLRPC or SOAP).
Future Plans: Optimization. Exploitation of new-style object model.
Optimizations to this module should not affect external-use semantics at all,
but may have a small impact on users who subclass and override methods.
@author: Glyph Lefkowitz
"""
# NOTE: this module should NOT import pb; it is supposed to be a module which
# abstractly defines remotely accessible types. Many of these types expect to
# be serialized by Jelly, but they ought to be accessible through other
# mechanisms (like XMLRPC)
# system imports
import sys
from zope.interface import implements, Interface
# twisted imports
from twisted.python import log, reflect
# sibling imports
from jelly import setUnjellyableForClass, setUnjellyableForClassTree, setUnjellyableFactoryForClass, unjellyableRegistry
from jelly import Jellyable, Unjellyable, _Dummy, _DummyNewStyle
from jelly import setInstanceState, getInstanceState
# compatibility
setCopierForClass = setUnjellyableForClass
setCopierForClassTree = setUnjellyableForClassTree
setFactoryForClass = setUnjellyableFactoryForClass
copyTags = unjellyableRegistry
copy_atom = "copy"
cache_atom = "cache"
cached_atom = "cached"
remote_atom = "remote"
class NoSuchMethod(AttributeError):
"""Raised if there is no such remote method"""
class IPBRoot(Interface):
"""Factory for root Referenceable objects for PB servers."""
def rootObject(broker):
"""Return root Referenceable for broker."""
class Serializable(Jellyable):
"""An object that can be passed remotely.
I am a style of object which can be serialized by Perspective
Broker. Objects which wish to be referenceable or copied remotely
have to subclass Serializable. However, clients of Perspective
Broker will probably not want to directly subclass Serializable; the
Flavors of transferable objects are listed below.
What it means to be \"Serializable\" is that an object can be
passed to or returned from a remote method. Certain basic types
(dictionaries, lists, tuples, numbers, strings) are serializable by
default; however, classes need to choose a specific serialization
style: L{Referenceable}, L{Viewable}, L{Copyable} or L{Cacheable}.
You may also pass C{[lists, dictionaries, tuples]} of L{Serializable}
instances to or return them from remote methods, as many levels deep
as you like.
"""
def processUniqueID(self):
"""Return an ID which uniquely represents this object for this process.
By default, this uses the 'id' builtin, but can be overridden to
indicate that two values are identity-equivalent (such as proxies
for the same object).
"""
return id(self)
class Referenceable(Serializable):
perspective = None
"""I am an object sent remotely as a direct reference.
When one of my subclasses is sent as an argument to or returned
from a remote method call, I will be serialized by default as a
direct reference.
This means that the peer will be able to call methods on me;
a method call xxx() from my peer will be resolved to methods
of the name remote_xxx.
"""
def remoteMessageReceived(self, broker, message, args, kw):
"""A remote message has been received. Dispatch it appropriately.
The default implementation is to dispatch to a method called
'remote_messagename' and call it with the same arguments.
"""
args = broker.unserialize(args)
kw = broker.unserialize(kw)
method = getattr(self, "remote_%s" % message, None)
if method is None:
raise NoSuchMethod("No such method: remote_%s" % (message,))
try:
state = method(*args, **kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
return broker.serialize(state, self.perspective)
def jellyFor(self, jellier):
"""(internal)
Return a tuple which will be used as the s-expression to
serialize this to a peer.
"""
return "remote", jellier.invoker.registerReference(self)
class Root(Referenceable):
"""I provide a root object to L{pb.Broker}s for a L{pb.BrokerFactory}.
When a L{pb.BrokerFactory} produces a L{pb.Broker}, it supplies that
L{pb.Broker} with an object named \"root\". That object is obtained
by calling my rootObject method.
"""
implements(IPBRoot)
def rootObject(self, broker):
"""A L{pb.BrokerFactory} is requesting to publish me as a root object.
When a L{pb.BrokerFactory} is sending me as the root object, this
method will be invoked to allow per-broker versions of an
object. By default I return myself.
"""
return self
class ViewPoint(Referenceable):
"""
I act as an indirect reference to an object accessed through a
L{pb.Perspective}.
Simply put, I combine an object with a perspective so that when a
peer calls methods on the object I refer to, the method will be
invoked with that perspective as a first argument, so that it can
know who is calling it.
While L{Viewable} objects will be converted to ViewPoints by default
when they are returned from or sent as arguments to a remote
method, any object may be manually proxied as well. (XXX: Now that
this class is no longer named C{Proxy}, this is the only occourance
of the term 'proxied' in this docstring, and may be unclear.)
This can be useful when dealing with L{pb.Perspective}s, L{Copyable}s,
and L{Cacheable}s. It is legal to implement a method as such on
a perspective::
| def perspective_getViewPointForOther(self, name):
| defr = self.service.getPerspectiveRequest(name)
| defr.addCallbacks(lambda x, self=self: ViewPoint(self, x), log.msg)
| return defr
This will allow you to have references to Perspective objects in two
different ways. One is through the initial 'attach' call -- each
peer will have a L{pb.RemoteReference} to their perspective directly. The
other is through this method; each peer can get a L{pb.RemoteReference} to
all other perspectives in the service; but that L{pb.RemoteReference} will
be to a L{ViewPoint}, not directly to the object.
The practical offshoot of this is that you can implement 2 varieties
of remotely callable methods on this Perspective; view_xxx and
C{perspective_xxx}. C{view_xxx} methods will follow the rules for
ViewPoint methods (see ViewPoint.L{remoteMessageReceived}), and
C{perspective_xxx} methods will follow the rules for Perspective
methods.
"""
def __init__(self, perspective, object):
"""Initialize me with a Perspective and an Object.
"""
self.perspective = perspective
self.object = object
def processUniqueID(self):
"""Return an ID unique to a proxy for this perspective+object combination.
"""
return (id(self.perspective), id(self.object))
def remoteMessageReceived(self, broker, message, args, kw):
"""A remote message has been received. Dispatch it appropriately.
The default implementation is to dispatch to a method called
'C{view_messagename}' to my Object and call it on my object with
the same arguments, modified by inserting my Perspective as
the first argument.
"""
args = broker.unserialize(args, self.perspective)
kw = broker.unserialize(kw, self.perspective)
method = getattr(self.object, "view_%s" % message)
try:
state = apply(method, (self.perspective,)+args, kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
rv = broker.serialize(state, self.perspective, method, args, kw)
return rv
class Viewable(Serializable):
"""I will be converted to a L{ViewPoint} when passed to or returned from a remote method.
The beginning of a peer's interaction with a PB Service is always
through a perspective. However, if a C{perspective_xxx} method returns
a Viewable, it will be serialized to the peer as a response to that
method.
"""
def jellyFor(self, jellier):
"""Serialize a L{ViewPoint} for me and the perspective of the given broker.
"""
return ViewPoint(jellier.invoker.serializingPerspective, self).jellyFor(jellier)
class Copyable(Serializable):
"""Subclass me to get copied each time you are returned from or passed to a remote method.
When I am returned from or passed to a remote method call, I will be
converted into data via a set of callbacks (see my methods for more
info). That data will then be serialized using Jelly, and sent to
the peer.
The peer will then look up the type to represent this with; see
L{RemoteCopy} for details.
"""
def getStateToCopy(self):
"""Gather state to send when I am serialized for a peer.
I will default to returning self.__dict__. Override this to
customize this behavior.
"""
return self.__dict__
def getStateToCopyFor(self, perspective):
"""
Gather state to send when I am serialized for a particular
perspective.
I will default to calling L{getStateToCopy}. Override this to
customize this behavior.
"""
return self.getStateToCopy()
def getTypeToCopy(self):
"""Determine what type tag to send for me.
By default, send the string representation of my class
(package.module.Class); normally this is adequate, but
you may override this to change it.
"""
return reflect.qual(self.__class__)
def getTypeToCopyFor(self, perspective):
"""Determine what type tag to send for me.
By default, defer to self.L{getTypeToCopy}() normally this is
adequate, but you may override this to change it.
"""
return self.getTypeToCopy()
def jellyFor(self, jellier):
"""Assemble type tag and state to copy for this broker.
This will call L{getTypeToCopyFor} and L{getStateToCopy}, and
return an appropriate s-expression to represent me.
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
p = jellier.invoker.serializingPerspective
t = self.getTypeToCopyFor(p)
state = self.getStateToCopyFor(p)
sxp = jellier.prepare(self)
sxp.extend([t, jellier.jelly(state)])
return jellier.preserve(self, sxp)
class Cacheable(Copyable):
"""A cached instance.
This means that it's copied; but there is some logic to make sure
that it's only copied once. Additionally, when state is retrieved,
it is passed a "proto-reference" to the state as it will exist on
the client.
XXX: The documentation for this class needs work, but it's the most
complex part of PB and it is inherently difficult to explain.
"""
def getStateToCacheAndObserveFor(self, perspective, observer):
"""
Get state to cache on the client and client-cache reference
to observe locally.
This is similiar to getStateToCopyFor, but it additionally
passes in a reference to the client-side RemoteCache instance
that will be created when it is unserialized. This allows
Cacheable instances to keep their RemoteCaches up to date when
they change, such that no changes can occur between the point
at which the state is initially copied and the client receives
it that are not propogated.
"""
return self.getStateToCopyFor(perspective)
def jellyFor(self, jellier):
"""Return an appropriate tuple to serialize me.
Depending on whether this broker has cached me or not, this may
return either a full state or a reference to an existing cache.
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
luid = jellier.invoker.cachedRemotelyAs(self, 1)
if luid is None:
luid = jellier.invoker.cacheRemotely(self)
p = jellier.invoker.serializingPerspective
type_ = self.getTypeToCopyFor(p)
observer = RemoteCacheObserver(jellier.invoker, self, p)
state = self.getStateToCacheAndObserveFor(p, observer)
l = jellier.prepare(self)
jstate = jellier.jelly(state)
l.extend([type_, luid, jstate])
return jellier.preserve(self, l)
else:
return cached_atom, luid
def stoppedObserving(self, perspective, observer):
"""This method is called when a client has stopped observing me.
The 'observer' argument is the same as that passed in to
getStateToCacheAndObserveFor.
"""
class RemoteCopy(Unjellyable):
"""I am a remote copy of a Copyable object.
When the state from a L{Copyable} object is received, an instance will
be created based on the copy tags table (see setUnjellyableForClass) and
sent the L{setCopyableState} message. I provide a reasonable default
implementation of that message; subclass me if you wish to serve as
a copier for remote data.
NOTE: copiers are invoked with no arguments. Do not implement a
constructor which requires args in a subclass of L{RemoteCopy}!
"""
def setCopyableState(self, state):
"""I will be invoked with the state to copy locally.
'state' is the data returned from the remote object's
'getStateToCopyFor' method, which will often be the remote
object's dictionary (or a filtered approximation of it depending
on my peer's perspective).
"""
self.__dict__ = state
def unjellyFor(self, unjellier, jellyList):
if unjellier.invoker is None:
return setInstanceState(self, unjellier, jellyList)
self.setCopyableState(unjellier.unjelly(jellyList[1]))
return self
class RemoteCache(RemoteCopy, Serializable):
"""A cache is a local representation of a remote L{Cacheable} object.
This represents the last known state of this object. It may
also have methods invoked on it -- in order to update caches,
the cached class generates a L{pb.RemoteReference} to this object as
it is originally sent.
Much like copy, I will be invoked with no arguments. Do not
implement a constructor that requires arguments in one of my
subclasses.
"""
def remoteMessageReceived(self, broker, message, args, kw):
"""A remote message has been received. Dispatch it appropriately.
The default implementation is to dispatch to a method called
'C{observe_messagename}' and call it on my with the same arguments.
"""
args = broker.unserialize(args)
kw = broker.unserialize(kw)
method = getattr(self, "observe_%s" % message)
try:
state = apply(method, args, kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
return broker.serialize(state, None, method, args, kw)
def jellyFor(self, jellier):
"""serialize me (only for the broker I'm for) as the original cached reference
"""
if jellier.invoker is None:
return getInstanceState(self, jellier)
assert jellier.invoker is self.broker, "You cannot exchange cached proxies between brokers."
return 'lcache', self.luid
def unjellyFor(self, unjellier, jellyList):
if unjellier.invoker is None:
return setInstanceState(self, unjellier, jellyList)
self.broker = unjellier.invoker
self.luid = jellyList[1]
if isinstance(self.__class__, type): #new-style class
cProxy = _DummyNewStyle()
else:
cProxy = _Dummy()
cProxy.__class__ = self.__class__
cProxy.__dict__ = self.__dict__
# XXX questionable whether this was a good design idea...
init = getattr(cProxy, "__init__", None)
if init:
init()
unjellier.invoker.cacheLocally(jellyList[1], self)
cProxy.setCopyableState(unjellier.unjelly(jellyList[2]))
# Might have changed due to setCopyableState method; we'll assume that
# it's bad form to do so afterwards.
self.__dict__ = cProxy.__dict__
# chomp, chomp -- some existing code uses "self.__dict__ =", some uses
# "__dict__.update". This is here in order to handle both cases.
self.broker = unjellier.invoker
self.luid = jellyList[1]
return cProxy
## def __really_del__(self):
## """Final finalization call, made after all remote references have been lost.
## """
def __cmp__(self, other):
"""Compare me [to another RemoteCache.
"""
if isinstance(other, self.__class__):
return cmp(id(self.__dict__), id(other.__dict__))
else:
return cmp(id(self.__dict__), other)
def __hash__(self):
"""Hash me.
"""
return int(id(self.__dict__) % sys.maxint)
broker = None
luid = None
def __del__(self):
"""Do distributed reference counting on finalize.
"""
try:
# log.msg( ' --- decache: %s %s' % (self, self.luid) )
if self.broker:
self.broker.decCacheRef(self.luid)
except:
log.deferr()
def unjellyCached(unjellier, unjellyList):
luid = unjellyList[1]
cNotProxy = unjellier.invoker.cachedLocallyAs(luid)
cProxy = _Dummy()
cProxy.__class__ = cNotProxy.__class__
cProxy.__dict__ = cNotProxy.__dict__
return cProxy
setUnjellyableForClass("cached", unjellyCached)
def unjellyLCache(unjellier, unjellyList):
luid = unjellyList[1]
obj = unjellier.invoker.remotelyCachedForLUID(luid)
return obj
setUnjellyableForClass("lcache", unjellyLCache)
def unjellyLocal(unjellier, unjellyList):
obj = unjellier.invoker.localObjectForID(unjellyList[1])
return obj
setUnjellyableForClass("local", unjellyLocal)
class RemoteCacheMethod:
"""A method on a reference to a L{RemoteCache}.
"""
def __init__(self, name, broker, cached, perspective):
"""(internal) initialize.
"""
self.name = name
self.broker = broker
self.perspective = perspective
self.cached = cached
def __cmp__(self, other):
return cmp((self.name, self.broker, self.perspective, self.cached), other)
def __hash__(self):
return hash((self.name, self.broker, self.perspective, self.cached))
def __call__(self, *args, **kw):
"""(internal) action method.
"""
cacheID = self.broker.cachedRemotelyAs(self.cached)
if cacheID is None:
from pb import ProtocolError
raise ProtocolError("You can't call a cached method when the object hasn't been given to the peer yet.")
return self.broker._sendMessage('cache', self.perspective, cacheID, self.name, args, kw)
class RemoteCacheObserver:
"""I am a reverse-reference to the peer's L{RemoteCache}.
I am generated automatically when a cache is serialized. I
represent a reference to the client's L{RemoteCache} object that
will represent a particular L{Cacheable}; I am the additional
object passed to getStateToCacheAndObserveFor.
"""
def __init__(self, broker, cached, perspective):
"""(internal) Initialize me.
@param broker: a L{pb.Broker} instance.
@param cached: a L{Cacheable} instance that this L{RemoteCacheObserver}
corresponds to.
@param perspective: a reference to the perspective who is observing this.
"""
self.broker = broker
self.cached = cached
self.perspective = perspective
def __repr__(self):
return "<RemoteCacheObserver(%s, %s, %s) at %s>" % (
self.broker, self.cached, self.perspective, id(self))
def __hash__(self):
"""Generate a hash unique to all L{RemoteCacheObserver}s for this broker/perspective/cached triplet
"""
return ( (hash(self.broker) % 2**10)
+ (hash(self.perspective) % 2**10)
+ (hash(self.cached) % 2**10))
def __cmp__(self, other):
"""Compare me to another L{RemoteCacheObserver}.
"""
return cmp((self.broker, self.perspective, self.cached), other)
def callRemote(self, _name, *args, **kw):
"""(internal) action method.
"""
cacheID = self.broker.cachedRemotelyAs(self.cached)
if cacheID is None:
from pb import ProtocolError
raise ProtocolError("You can't call a cached method when the "
"object hasn't been given to the peer yet.")
return self.broker._sendMessage('cache', self.perspective, cacheID,
_name, args, kw)
def remoteMethod(self, key):
"""Get a L{pb.RemoteMethod} for this key.
"""
return RemoteCacheMethod(key, self.broker, self.cached, self.perspective)
| apache-2.0 | 1,825,073,701,295,633,200 | -4,310,698,178,370,034,000 | 35.342809 | 120 | 0.661068 | false |
NLnetLabs/ldns | contrib/python/examples/ldns-signzone.py | 9 | 1609 | #!/usr/bin/python
# This example shows how to sign a given zone file with private key
import ldns
import sys, os, time
#private key TAG which identifies the private key
#use ldns-keygen.py in order to obtain private key
keytag = 30761
# Read zone file
#-------------------------------------------------------------
zone = ldns.ldns_zone.new_frm_fp(open("zone.txt","r"), None, 0, ldns.LDNS_RR_CLASS_IN)
soa = zone.soa()
origin = soa.owner()
# Prepare keys
#-------------------------------------------------------------
#Read private key from file
keyfile = open("key-%s-%d.private" % (origin, keytag), "r");
key = ldns.ldns_key.new_frm_fp(keyfile)
#Read public key from file
pubfname = "key-%s-%d.key" % (origin, keytag)
pubkey = None
if os.path.isfile(pubfname):
pubkeyfile = open(pubfname, "r");
pubkey,_,_,_ = ldns.ldns_rr.new_frm_fp(pubkeyfile)
if not pubkey:
#Create new public key
pubkey = key.key_to_rr()
#Set key expiration
key.set_expiration(int(time.time()) + 365*60*60*24) #365 days
#Set key owner (important step)
key.set_pubkey_owner(origin)
#Insert DNSKEY RR
zone.push_rr(pubkey)
# Sign zone
#-------------------------------------------------------------
#Create keylist and push private key
keys = ldns.ldns_key_list()
keys.push_key(key)
#Add SOA
signed_zone = ldns.ldns_dnssec_zone()
signed_zone.add_rr(soa)
#Add RRs
for rr in zone.rrs().rrs():
print "RR:",str(rr),
signed_zone.add_rr(rr)
added_rrs = ldns.ldns_rr_list()
status = signed_zone.sign(added_rrs, keys)
if (status == ldns.LDNS_STATUS_OK):
signed_zone.print_to_file(open("zone_signed.txt","w"))
| bsd-3-clause | -292,735,110,945,257,340 | 826,112,696,256,279,700 | 23.753846 | 86 | 0.612181 | false |
juanalfonsopr/odoo | addons/sale/sales_team.py | 171 | 4218 | # -*- coding: utf-8 -*-
import calendar
from datetime import date
from dateutil import relativedelta
import json
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_repr
class crm_case_section(osv.osv):
_inherit = 'crm.case.section'
def _get_sale_orders_data(self, cr, uid, ids, field_name, arg, context=None):
obj = self.pool['sale.order']
month_begin = date.today().replace(day=1)
date_begin = (month_begin - relativedelta.relativedelta(months=self._period_number - 1)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
date_end = month_begin.replace(day=calendar.monthrange(month_begin.year, month_begin.month)[1]).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
res = {}
for id in ids:
res[id] = {}
created_domain = [('section_id', '=', id), ('state', '=', 'draft'), ('date_order', '>=', date_begin), ('date_order', '<=', date_end)]
validated_domain = [('section_id', '=', id), ('state', 'not in', ['draft', 'sent', 'cancel']), ('date_order', '>=', date_begin), ('date_order', '<=', date_end)]
res[id]['monthly_quoted'] = json.dumps(self.__get_bar_values(cr, uid, obj, created_domain, ['amount_total', 'date_order'], 'amount_total', 'date_order', context=context))
res[id]['monthly_confirmed'] = json.dumps(self.__get_bar_values(cr, uid, obj, validated_domain, ['amount_untaxed', 'date_order'], 'amount_untaxed', 'date_order', context=context))
return res
def _get_invoices_data(self, cr, uid, ids, field_name, arg, context=None):
obj = self.pool['account.invoice.report']
month_begin = date.today().replace(day=1)
date_begin = (month_begin - relativedelta.relativedelta(months=self._period_number - 1)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
date_end = month_begin.replace(day=calendar.monthrange(month_begin.year, month_begin.month)[1]).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
res = {}
for id in ids:
created_domain = [('type', 'in', ['out_invoice', 'out_refund']), ('section_id', '=', id), ('state', 'not in', ['draft', 'cancel']), ('date', '>=', date_begin), ('date', '<=', date_end)]
values = self.__get_bar_values(cr, uid, obj, created_domain, ['price_total', 'date'], 'price_total', 'date', context=context)
for value in values:
value['value'] = float_repr(value.get('value', 0), precision_digits=self.pool['decimal.precision'].precision_get(cr, uid, 'Account'))
res[id] = json.dumps(values)
return res
_columns = {
'use_quotations': fields.boolean('Quotations', help="Check this box to manage quotations in this sales team."),
'invoiced_forecast': fields.integer(string='Invoice Forecast',
help="Forecast of the invoice revenue for the current month. This is the amount the sales \n"
"team should invoice this month. It is used to compute the progression ratio \n"
" of the current and forecast revenue on the kanban view."),
'invoiced_target': fields.integer(string='Invoice Target',
help="Target of invoice revenue for the current month. This is the amount the sales \n"
"team estimates to be able to invoice this month."),
'monthly_quoted': fields.function(_get_sale_orders_data,
type='char', readonly=True, multi='_get_sale_orders_data',
string='Rate of created quotation per duration'),
'monthly_confirmed': fields.function(_get_sale_orders_data,
type='char', readonly=True, multi='_get_sale_orders_data',
string='Rate of validate sales orders per duration'),
'monthly_invoiced': fields.function(_get_invoices_data,
type='char', readonly=True,
string='Rate of sent invoices per duration'),
}
_defaults = {
'use_quotations': True,
}
def action_forecast(self, cr, uid, id, value, context=None):
return self.write(cr, uid, [id], {'invoiced_forecast': round(float(value))}, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,645,121,444,455,202,000 | -7,178,632,076,342,988,000 | 57.583333 | 197 | 0.628971 | false |
BorgERP/borg-erp-6of3 | addons/event_project/event_project.py | 9 | 2565 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
class one2many_mod_task(fields.one2many):
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if not values:
values = {}
res = {}
for id in ids:
res[id] = []
for id in ids:
query = "select project_id from event_event where id = %s"
cr.execute(query, (id,))
project_ids = [ x[0] for x in cr.fetchall()]
ids2 = obj.pool.get(self._obj).search(cr, user, [(self._fields_id, 'in', project_ids), ('state', '<>', 'done')], limit=self._limit)
for r in obj.pool.get(self._obj)._read_flat(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'):
res[id].append( r['id'] )
return res
class event(osv.osv):
_inherit = 'event.event'
def write(self, cr, uid, ids, vals, *args, **kwargs):
if 'date_begin' in vals and vals['date_begin']:
for eve in self.browse(cr, uid, ids):
if eve.project_id:
self.pool.get('project.project').write(cr, uid, [eve.project_id.id], {'date_end': eve.date_begin[:10]})
return super(event,self).write(cr, uid, ids, vals, *args, **kwargs)
_columns = {
'project_id': fields.many2one('project.project', 'Project', readonly=True),
'task_ids': one2many_mod_task('project.task', 'project_id', "Project tasks", readonly=True, domain="[('state', '<>', 'done')]"),
}
event()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 | 5,216,897,452,975,714,000 | -4,565,723,588,257,587,000 | 43.241379 | 143 | 0.576998 | false |
mikjo/bigitr | unit_test/daemonconfig_test.py | 1 | 4363 | #
# Copyright 2012 SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from cStringIO import StringIO
import tempfile
import testutils
from bigitr import daemonconfig
class TestDaemonConfig(testutils.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp(suffix='.bigitr')
os.environ['DDIR'] = self.dir
daemonConfig = self.dir + '/daemon'
file(daemonConfig, 'w').write('''
[GLOBAL]
appconfig = ${DDIR}/app
[foo]
repoconfig = ${DDIR}/foo1.* ${DDIR}/foo2.*
[bar]
appconfig = ${DDIR}/app2
repoconfig = ${DDIR}/bar
email = other@other blah@blah
''')
self.cfg = daemonconfig.DaemonConfig(daemonConfig)
def tearDown(self):
self.removeRecursive(self.dir)
os.unsetenv('DDIR')
def test_parallelConversions(self):
self.assertEqual(1, self.cfg.parallelConversions())
self.cfg.set('GLOBAL', 'parallel', '8')
self.assertEqual(8, self.cfg.parallelConversions())
def test_getPollFrequency(self):
self.assertEqual(300, self.cfg.getPollFrequency())
self.cfg.set('GLOBAL', 'pollfrequency', '1h')
self.assertEqual(3600, self.cfg.getPollFrequency())
def test_getFullSyncFrequency(self):
self.assertEqual(86000, self.cfg.getFullSyncFrequency())
self.cfg.set('GLOBAL', 'syncfrequency', '1h')
self.assertEqual(3600, self.cfg.getFullSyncFrequency())
def test_getEmail(self):
self.assertEqual(None, self.cfg.getEmail())
self.cfg.set('GLOBAL', 'email', 'here@here')
self.assertEqual(['here@here'], self.cfg.getEmail())
def test_getMailFrom(self):
self.assertEqual(None, self.cfg.getMailFrom())
self.cfg.set('GLOBAL', 'mailfrom', 'noreply@here')
self.assertEqual('noreply@here', self.cfg.getMailFrom())
def test_getMailAll(self):
self.assertFalse(self.cfg.getMailAll())
self.cfg.set('GLOBAL', 'mailall', 'true')
self.assertTrue(self.cfg.getMailAll())
def test_getSmartHost(self):
self.assertEqual('localhost', self.cfg.getSmartHost())
self.cfg.set('GLOBAL', 'smarthost', 'foo')
self.assertEqual('foo', self.cfg.getSmartHost())
def test_getApplicationContexts(self):
self.assertEqual(set(('foo', 'bar')), self.cfg.getApplicationContexts())
def test_getAppConfig(self):
self.assertEqual(self.dir + '/app', self.cfg.getAppConfig('foo'))
self.assertEqual(self.dir + '/app2', self.cfg.getAppConfig('bar'))
def test_getRepoConfigs(self):
# files have to exist to be globbed
file(self.dir + '/foo1.1', 'w')
file(self.dir + '/foo1.2', 'w')
file(self.dir + '/foo2.1', 'w')
file(self.dir + '/bar', 'w')
self.assertEqual([self.dir + '/foo1.1',
self.dir + '/foo1.2',
self.dir + '/foo2.1'],
self.cfg.getRepoConfigs('foo'))
self.assertEqual([self.dir + '/bar'], self.cfg.getRepoConfigs('bar'))
def test_parseTimeSpec(self):
self.assertEqual(3600, self.cfg._parseTimeSpec('1h'))
self.assertEqual(3600, self.cfg._parseTimeSpec('1H'))
self.assertEqual(60, self.cfg._parseTimeSpec('1m'))
self.assertEqual(60, self.cfg._parseTimeSpec('1M'))
self.assertEqual(1, self.cfg._parseTimeSpec('1s'))
self.assertEqual(1, self.cfg._parseTimeSpec('1S'))
self.assertEqual(1, self.cfg._parseTimeSpec('1'))
self.assertEqual(3661, self.cfg._parseTimeSpec('1h1m1'))
self.assertEqual(3612, self.cfg._parseTimeSpec('1h12'))
self.assertEqual(3661, self.cfg._parseTimeSpec('1h1m1s'))
self.assertEqual(3661, self.cfg._parseTimeSpec('1h 1m 1s'))
self.assertEqual(3661, self.cfg._parseTimeSpec('1h 1m 1s '))
self.assertEqual(3661, self.cfg._parseTimeSpec(' 1h 1m 1s '))
| apache-2.0 | 5,895,474,036,833,886,000 | -5,545,853,957,760,487,000 | 37.610619 | 80 | 0.65047 | false |
freakboy3742/pyxero | xero/constants.py | 3 | 2057 | XERO_BASE_URL = "https://api.xero.com"
REQUEST_TOKEN_URL = "/oauth/RequestToken"
AUTHORIZE_URL = "/oauth/Authorize"
ACCESS_TOKEN_URL = "/oauth/AccessToken"
XERO_API_URL = "/api.xro/2.0"
XERO_FILES_URL = "/files.xro/1.0"
XERO_PAYROLL_URL = "/payroll.xro/1.0"
XERO_PROJECTS_URL = "/projects.xro/2.0"
XERO_OAUTH2_AUTHORIZE_URL = "https://login.xero.com/identity/connect/authorize"
XERO_OAUTH2_TOKEN_URL = "https://identity.xero.com/connect/token"
XERO_OAUTH2_CONNECTIONS_URL = "/connections"
class XeroScopes:
# Offline Access
OFFLINE_ACCESS = "offline_access"
# OpenID connection
OPENID = "openid"
PROFILE = "profile"
EMAIL = "email"
# Accounting API
ACCOUNTING_TRANSACTIONS = "accounting.transactions"
ACCOUNTING_TRANSACTIONS_READ = "accounting.transactions.read"
ACCOUNTING_REPORTS_READ = "accounting.reports.read"
ACCOUNTING_JOURNALS_READ = "accounting.journals.read"
ACCOUNTING_SETTINGS = "accounting.settings"
ACCOUNTING_SETTINGS_READ = "accounting.settings.read"
ACCOUNTING_CONTACTS = "accounting.contacts"
ACCOUNTING_CONTACTS_READ = "accounting.contacts.read"
ACCOUNTING_ATTACHMENTS = "accounting.attachments"
ACCOUNTING_ATTACHMENTS_READ = "accounting.attachments.read"
# Payroll API
PAYROLL_EMPLOYEES = "payroll.employees"
PAYROLL_EMPLOYEES_READ = "payroll.employees.read"
PAYROLL_PAYRUNS = "payroll.payruns"
PAYROLL_PAYRUNS_READ = "payroll.payruns.read"
PAYROLL_PAYSLIP = "payroll.payslip"
PAYROLL_PAYSLIP_READ = "payroll.payslip.read"
PAYROLL_TIMESHEETS = "payroll.timesheets"
PAYROLL_TIMESHEETS_READ = "payroll.timesheets.read"
PAYROLL_SETTINGS = "payroll.settings"
PAYROLL_SETTINGS_READ = "payroll.settings.read"
# Files API
FILES = "files"
FILES_READ = "files.read"
# Asssets API
ASSETS = "assets"
ASSETS_READ = "assets.read"
# Projects API
PROJECTS = "projects"
PROJECTS_READ = "projects.read"
# Restricted Scopes
PAYMENTSERVICES = "paymentservices"
BANKFEEDS = "bankfeeds"
| bsd-3-clause | 3,351,941,168,877,168,600 | -5,193,972,149,408,233,000 | 31.650794 | 79 | 0.707827 | false |
shangwuhencc/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause | -1,204,208,751,331,382,300 | 3,073,736,327,553,794,600 | 31.380952 | 80 | 0.681373 | false |
xuru/pyvisdk | pyvisdk/do/host_network_traffic_shaping_policy.py | 1 | 1089 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostNetworkTrafficShapingPolicy(vim, *args, **kwargs):
'''This data object type describes traffic shaping policy.'''
obj = vim.client.factory.create('ns0:HostNetworkTrafficShapingPolicy')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'averageBandwidth', 'burstSize', 'enabled', 'peakBandwidth', 'dynamicProperty',
'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit | -8,893,739,221,158,347,000 | -6,464,858,788,344,180,000 | 31.058824 | 124 | 0.606061 | false |
rogerhu/django | django/middleware/common.py | 11 | 7283 | import hashlib
import logging
import re
import warnings
from django.conf import settings
from django.core.mail import mail_managers
from django.core import urlresolvers
from django import http
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils import six
logger = logging.getLogger('django.request')
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
logger.warning('Forbidden (User agent): %s', request.path,
extra={
'status_code': 403,
'request': request
}
)
return http.HttpResponseForbidden('<h1>Forbidden</h1>')
# Check for a redirect based on settings.APPEND_SLASH
# and settings.PREPEND_WWW
host = request.get_host()
old_url = [host, request.path]
new_url = old_url[:]
if (settings.PREPEND_WWW and old_url[0] and
not old_url[0].startswith('www.')):
new_url[0] = 'www.' + old_url[0]
# Append a slash if APPEND_SLASH is set and the URL doesn't have a
# trailing slash and there is no pattern for the current path
if settings.APPEND_SLASH and (not old_url[1].endswith('/')):
urlconf = getattr(request, 'urlconf', None)
if (not urlresolvers.is_valid_path(request.path_info, urlconf) and
urlresolvers.is_valid_path("%s/" % request.path_info, urlconf)):
new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST':
raise RuntimeError((""
"You called this URL via POST, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining POST data. "
"Change your form to point to %s%s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django "
"settings.") % (new_url[0], new_url[1]))
if new_url == old_url:
# No redirects required.
return
if new_url[0]:
newurl = "%s://%s%s" % (
request.scheme,
new_url[0], urlquote(new_url[1]))
else:
newurl = urlquote(new_url[1])
if request.META.get('QUERY_STRING', ''):
if six.PY3:
newurl += '?' + request.META['QUERY_STRING']
else:
# `query_string` is a bytestring. Appending it to the unicode
# string `newurl` will fail if it isn't ASCII-only. This isn't
# allowed; only broken software generates such query strings.
# Better drop the invalid query string than crash (#15152).
try:
newurl += '?' + request.META['QUERY_STRING'].decode()
except UnicodeDecodeError:
pass
return http.HttpResponsePermanentRedirect(newurl)
def process_response(self, request, response):
"""
Calculate the ETag, if needed.
"""
if settings.SEND_BROKEN_LINK_EMAILS:
warnings.warn("SEND_BROKEN_LINK_EMAILS is deprecated. "
"Use BrokenLinkEmailsMiddleware instead.",
DeprecationWarning, stacklevel=2)
BrokenLinkEmailsMiddleware().process_response(request, response)
if settings.USE_ETAGS:
if response.has_header('ETag'):
etag = response['ETag']
elif response.streaming:
etag = None
else:
etag = '"%s"' % hashlib.md5(response.content).hexdigest()
if etag is not None:
if (200 <= response.status_code < 300
and request.META.get('HTTP_IF_NONE_MATCH') == etag):
cookies = response.cookies
response = http.HttpResponseNotModified()
response.cookies = cookies
else:
response['ETag'] = etag
return response
class BrokenLinkEmailsMiddleware(object):
def process_response(self, request, response):
"""
Send broken link emails for relevant 404 NOT FOUND responses.
"""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = force_text(request.META.get('HTTP_REFERER', ''), errors='replace')
if not self.is_ignorable_request(request, path, domain, referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Returns True if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Returns True if the given request *shouldn't* notify the site managers.
"""
# '?' in referer is identified as search engine source
if (not referer or
(not self.is_internal_request(domain, referer) and '?' in referer)):
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
| bsd-3-clause | -7,535,753,404,052,222,000 | -8,748,887,208,997,314,000 | 40.856322 | 91 | 0.563504 | false |
shail2810/nova | nova/tests/functional/test_servers.py | 31 | 19364 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
import zlib
from oslo_log import log as logging
from oslo_utils import timeutils
from nova import context
from nova import exception
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
import nova.virt.fake
LOG = logging.getLogger(__name__)
class ServersTestBase(integrated_helpers._IntegratedTestBase):
_api_version = 'v2'
_force_delete_parameter = 'forceDelete'
_image_ref_parameter = 'imageRef'
_flavor_ref_parameter = 'flavorRef'
_access_ipv4_parameter = 'accessIPv4'
_access_ipv6_parameter = 'accessIPv6'
_return_resv_id_parameter = 'return_reservation_id'
_min_count_parameter = 'min_count'
def setUp(self):
super(ServersTestBase, self).setUp()
self.conductor = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
def _wait_for_state_change(self, server, from_status):
for i in range(0, 50):
server = self.api.get_server(server['id'])
if server['status'] != from_status:
break
time.sleep(.1)
return server
def _restart_compute_service(self, *args, **kwargs):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
self.compute = self.start_service('compute', *args, **kwargs)
def _wait_for_deletion(self, server_id):
# Wait (briefly) for deletion
for _retries in range(50):
try:
found_server = self.api.get_server(server_id)
except client.OpenStackApiNotFoundException:
found_server = None
LOG.debug("Got 404, proceeding")
break
LOG.debug("Found_server=%s" % found_server)
# TODO(justinsb): Mock doesn't yet do accurate state changes
# if found_server['status'] != 'deleting':
# break
time.sleep(.1)
# Should be gone
self.assertFalse(found_server)
def _delete_server(self, server_id):
# Delete the server
self.api.delete_server(server_id)
self._wait_for_deletion(server_id)
def _get_access_ips_params(self):
return {self._access_ipv4_parameter: "172.19.0.2",
self._access_ipv6_parameter: "fe80::2"}
def _verify_access_ips(self, server):
self.assertEqual('172.19.0.2',
server[self._access_ipv4_parameter])
self.assertEqual('fe80::2', server[self._access_ipv6_parameter])
class ServersTest(ServersTestBase):
def test_get_servers(self):
# Simple check that listing servers works.
servers = self.api.get_servers()
for server in servers:
LOG.debug("server: %s" % server)
def test_create_server_with_error(self):
# Create a server which will enter error state.
fake_network.set_stub_network_methods(self.stubs)
def throw_error(*args, **kwargs):
raise exception.BuildAbortException(reason='',
instance_uuid='fake')
self.stubs.Set(nova.virt.fake.FakeDriver, 'spawn', throw_error)
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ERROR', found_server['status'])
self._delete_server(created_server_id)
def test_create_and_delete_server(self):
# Creates and deletes a server.
fake_network.set_stub_network_methods(self.stubs)
# Create server
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_minimal_create_server_request()
post = {'server': server}
# Without an imageRef, this throws 500.
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# With an invalid imageRef, this throws 500.
server[self._image_ref_parameter] = self.get_invalid_image()
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Add a valid imageRef
server[self._image_ref_parameter] = good_server.get(
self._image_ref_parameter)
# Without flavorRef, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
server[self._flavor_ref_parameter] = good_server.get(
self._flavor_ref_parameter)
# Without a name, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Set a valid server name
server['name'] = good_server['name']
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertIn(created_server_id, server_ids)
found_server = self._wait_for_state_change(found_server, 'BUILD')
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
self.assertEqual('ACTIVE', found_server['status'])
servers = self.api.get_servers(detail=True)
for server in servers:
self.assertIn("image", server)
self.assertIn("flavor", server)
self._delete_server(created_server_id)
def _force_reclaim(self):
# Make sure that compute manager thinks the instance is
# old enough to be expired
the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
timeutils.set_time_override(override_time=the_past)
self.addCleanup(timeutils.clear_time_override)
ctxt = context.get_admin_context()
self.compute._reclaim_queued_deletes(ctxt)
def test_deferred_delete(self):
# Creates, deletes and waits for server to be reclaimed.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Cannot restore unless instance is deleted
self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, created_server_id,
{'restore': {}})
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
self._force_reclaim()
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def test_deferred_delete_restore(self):
# Creates, deletes and restores a server.
self.flags(reclaim_instance_interval=3600)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
# Restore server
self.api.post_server_action(created_server_id, {'restore': {}})
# Wait for server to become active again
found_server = self._wait_for_state_change(found_server, 'DELETED')
self.assertEqual('ACTIVE', found_server['status'])
def test_deferred_delete_force(self):
# Creates, deletes and force deletes a server.
self.flags(reclaim_instance_interval=3600)
fake_network.set_stub_network_methods(self.stubs)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
# Force delete server
self.api.post_server_action(created_server_id,
{self._force_delete_parameter: {}})
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def test_create_server_with_metadata(self):
# Creates a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# Build the server data gradually, checking errors along the way
server = self._build_minimal_create_server_request()
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server['metadata'] = metadata
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers details list
servers = self.api.get_servers(detail=True)
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Details do include metadata
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers summary list
servers = self.api.get_servers(detail=False)
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Summary should not include metadata
self.assertFalse(found_server.get('metadata'))
# Cleanup
self._delete_server(created_server_id)
def test_create_and_rebuild_server(self):
# Rebuild a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
server_post = {'server': server}
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server_post['server']['metadata'] = metadata
created_server = self.api.post_server(server_post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata and other server attributes
post = {}
post['rebuild'] = {
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"name": "blah",
self._access_ipv4_parameter: "172.19.0.2",
self._access_ipv6_parameter: "fe80::2",
"metadata": {'some': 'thing'},
}
post['rebuild'].update(self._get_access_ips_params())
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild'][self._image_ref_parameter],
found_server.get('image')['id'])
self._verify_access_ips(found_server)
# rebuild the server with empty metadata and nothing else
post = {}
post['rebuild'] = {
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"metadata": {},
}
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild'][self._image_ref_parameter],
found_server.get('image')['id'])
self._verify_access_ips(found_server)
# Cleanup
self._delete_server(created_server_id)
def test_rename_server(self):
# Test building and renaming a server.
fake_network.set_stub_network_methods(self.stubs)
# Create a server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
server_id = created_server['id']
self.assertTrue(server_id)
# Rename the server to 'new-name'
self.api.put_server(server_id, {'server': {'name': 'new-name'}})
# Check the name of the server
created_server = self.api.get_server(server_id)
self.assertEqual(created_server['name'], 'new-name')
# Cleanup
self._delete_server(server_id)
def test_create_multiple_servers(self):
# Creates multiple servers and checks for reservation_id.
# Create 2 servers, setting 'return_reservation_id, which should
# return a reservation_id
server = self._build_minimal_create_server_request()
server[self._min_count_parameter] = 2
server[self._return_resv_id_parameter] = True
post = {'server': server}
response = self.api.post_server(post)
self.assertIn('reservation_id', response)
reservation_id = response['reservation_id']
self.assertNotIn(reservation_id, ['', None])
# Create 1 more server, which should not return a reservation_id
server = self._build_minimal_create_server_request()
post = {'server': server}
created_server = self.api.post_server(post)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# lookup servers created by the first request.
servers = self.api.get_servers(detail=True,
search_opts={'reservation_id': reservation_id})
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
# The server from the 2nd request should not be there.
self.assertIsNone(found_server)
# Should have found 2 servers.
self.assertEqual(len(server_map), 2)
# Cleanup
self._delete_server(created_server_id)
for server_id in server_map:
self._delete_server(server_id)
def test_create_server_with_injected_files(self):
# Creates a server with injected_files.
fake_network.set_stub_network_methods(self.stubs)
personality = []
# Inject a text file
data = 'Hello, World!'
personality.append({
'path': '/helloworld.txt',
'contents': data.encode('base64'),
})
# Inject a binary file
data = zlib.compress('Hello, World!')
personality.append({
'path': '/helloworld.zip',
'contents': data.encode('base64'),
})
# Create server
server = self._build_minimal_create_server_request()
server['personality'] = personality
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ACTIVE', found_server['status'])
# Cleanup
self._delete_server(created_server_id)
class ServersTestV3(client.TestOpenStackClientV3Mixin, ServersTest):
_api_version = 'v3'
| apache-2.0 | -1,839,595,261,204,028,000 | 4,226,043,321,169,321,000 | 36.746589 | 79 | 0.624871 | false |
oaubert/advene | lib/advene/plugins/tts.py | 1 | 14814 | #
# Advene: Annotate Digital Videos, Exchange on the NEt
# Copyright (C) 2008-2017 Olivier Aubert <[email protected]>
#
# Advene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Advene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Advene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import logging
logger = logging.getLogger(__name__)
from gettext import gettext as _
import subprocess
import os
import signal
import advene.core.config as config
from advene.rules.elements import RegisteredAction
import advene.util.helper as helper
import advene.model.tal.context
CREATE_NO_WINDOW = 0x8000000
name="Text-To-Speech actions"
ENGINES={}
def subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# Registering decorator
def ttsengine(name):
def inside_register(f):
ENGINES[name] = f
return f
return inside_register
def register(controller=None):
engine_name = config.data.preferences.get('tts-engine', 'auto')
selected = None
if engine_name == 'auto':
# Automatic configuration. Order is important.
for name in ('customarg', 'custom', 'espeak', 'macosx', 'festival', 'sapi', 'generic'):
c = ENGINES[name]
if c.can_run():
logger.info("TTS: Automatically using " + c.__doc__.splitlines()[0])
selected = c
break
else:
c = ENGINES.get(engine_name)
if c is None:
logger.warning("TTS: %s was specified but it does not exist. Using generic fallback. Please check your configuration." % c.__doc__.splitlines()[0])
selected = ENGINES['generic']
elif c.can_run():
logger.warning("TTS: Using %s as specified." % c.__doc__.splitlines()[0])
selected = c
else:
logger.warning("TTS: Using %s as specified, but it apparently cannot run. Please check your configuration." % c.__doc__.splitlines()[0])
selected = c
engine = selected(controller)
controller.register_action(RegisteredAction(
name="Pronounce",
method=engine.action_pronounce,
description=_("Pronounce a text"),
parameters={'message': _("String to pronounce.")},
defaults={'message': 'annotation/content/data'},
predefined={'message': (
( 'annotation/content/data', _("The annotation content") ),
)},
category='sound',
))
@ttsengine('generic')
class TTSEngine:
"""Generic TTSEngine.
"""
def __init__(self, controller=None):
self.controller=controller
self.gui=self.controller.gui
self.language=None
@staticmethod
def can_run():
"""Can this engine run ?
"""
return True
def parse_parameter(self, context, parameters, name, default_value):
"""Helper method used in actions.
"""
if name in parameters:
try:
result=context.evaluateValue(parameters[name])
except advene.model.tal.context.AdveneTalesException as e:
try:
rulename=context.evaluateValue('rule')
except advene.model.tal.context.AdveneTalesException:
rulename=_("Unknown rule")
logger.error(_("Rule %(rulename)s: Error in the evaluation of the parameter %(parametername)s:") % {'rulename': rulename,
'parametername': name})
logger.error(str(e)[:160])
result=default_value
else:
result=default_value
return result
def set_language(self, language):
self.language=language
def pronounce(self, sentence):
"""Engine-specific method.
"""
logger.debug("TTS: pronounce " + sentence)
return True
def action_pronounce (self, context, parameters):
"""Pronounce action.
"""
message=self.parse_parameter(context, parameters, 'message', _("No message..."))
self.pronounce(message)
return True
@ttsengine('festival')
class FestivalTTSEngine(TTSEngine):
"""Festival TTSEngine.
Note: If it is not the case (depends on the version), festival
must be configured to play audio through the ALSA subsystem, in
order to be able to mix it with the movie sound if necessary.
For this, in older Festival versions (at least until 1.4.3), the
~/.festivalrc file should contain:
(Parameter.set 'Audio_Command "aplay -q -c 1 -t raw -f s16 -r $SR $FILE")
(Parameter.set 'Audio_Method 'Audio_Command)
"""
def __init__(self, controller=None):
TTSEngine.__init__(self, controller=controller)
self.festival_path=helper.find_in_path('festival')
self.aplay_path=helper.find_in_path('aplay')
if self.festival_path is None:
logger.warning(_("TTS disabled. Cannot find the application 'festival' in PATH"))
if self.aplay_path is None:
logger.warning(_("TTS disabled. Cannot find the application 'aplay' in PATH"))
self.festival_process=None
def init(self):
if self.festival_path is not None and self.aplay_path is not None:
if config.data.os == 'win32':
import win32process
kw = { 'creationflags': win32process.CREATE_NO_WINDOW }
else:
kw = { 'preexec_fn': subprocess_setup }
self.festival_process = subprocess.Popen([ self.festival_path, '--pipe' ], stdin=subprocess.PIPE, **kw)
# Configure festival to use aplay
self.festival_process.stdin.write("""(Parameter.set 'Audio_Command "%s -q -c 1 -t raw -f s16 -r $SR $FILE")\n""" % self.aplay_path)
self.festival_process.stdin.write("""(Parameter.set 'Audio_Method 'Audio_Command)\n""")
@staticmethod
def can_run():
"""Can this engine run ?
"""
return helper.find_in_path('festival') is not None
def pronounce (self, sentence):
try:
self.init()
if self.festival_process is not None:
self.festival_process.stdin.write('(SayText "%s")\n' % helper.unaccent(sentence))
except OSError as e:
logger.error("TTS Error: " + str(e.message))
return True
@ttsengine('macosx')
class MacOSXTTSEngine(TTSEngine):
"""MacOSX TTSEngine.
"""
@staticmethod
def can_run():
"""Can this engine run ?
"""
return config.data.os == 'darwin'
def pronounce (self, sentence):
subprocess.call( [ '/usr/bin/say', sentence.encode(config.data.preferences['tts-encoding'], 'ignore') ] )
return True
"""
Win32: install pytts + pywin32 (from sf.net) + mfc71.dll + spchapi.exe (from www.microsoft.com/reader/developer/downloads/tts.mspx
)
On some flavors of Windows you can use:
import pyTTS
tts = pyTTS.Create()
tts.Speak('This is the sound of my voice.')
On Mac OS X you can use:
import os
http://farm.tucows.com/blog/_archives/2005/1/19/266813.html
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/114216
http://www.daniweb.com/code/snippet326.html
http://www.mindtrove.info/articles/pytts.html
"""
@ttsengine('espeak')
class EspeakTTSEngine(TTSEngine):
"""Espeak TTSEngine.
"""
def __init__(self, controller=None):
TTSEngine.__init__(self, controller=controller)
self.language=None
self.espeak_path=helper.find_in_path('espeak')
if self.espeak_path is None and config.data.os == 'win32':
# Try c:\\Program Files\\eSpeak
if os.path.isdir('c:\\Program Files\\eSpeak'):
self.espeak_path='c:\\Program Files\\eSpeak\\command_line\\espeak.exe'
elif os.path.isdir('C:\\Program Files (x86)\\eSpeak'):
#winXp 64b
self.espeak_path='C:\\Program Files (x86)\\eSpeak\\command_line\\espeak.exe'
self.espeak_process=None
@staticmethod
def can_run():
"""Can this engine run ?
"""
return (os.path.isdir('c:\\Program Files\\eSpeak')
or os.path.isdir('C:\\Program Files (x86)\\eSpeak')
or helper.find_in_path('espeak') is not None)
def close(self):
"""Close the espeak process.
"""
if self.espeak_process is not None:
if config.data.os == 'win32':
import win32api
win32api.TerminateProcess(int(self.espeak_process._handle), -1)
else:
os.kill(self.espeak_process.pid, signal.SIGTERM)
self.espeak_process.wait()
self.espeak_process=None
def pronounce (self, sentence):
lang=config.data.preferences.get('tts-language', 'en')
if self.language != lang:
# Need to restart espeak to use the new language
self.close()
self.language=lang
try:
if self.espeak_process is None:
if config.data.os == 'win32':
import win32process
kw = { 'creationflags': win32process.CREATE_NO_WINDOW }
else:
kw = { 'preexec_fn': subprocess_setup }
self.espeak_process = subprocess.Popen([ self.espeak_path, '-v', self.language ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, **kw)
self.espeak_process.stdin.write((sentence + "\n").encode(config.data.preferences['tts-encoding'], 'ignore'))
except OSError as e:
logger.error("TTS Error: %s", str(e.message))
return True
@ttsengine('sapi')
class SAPITTSEngine(TTSEngine):
"""SAPI (win32) TTSEngine.
"""
# SAPI constants (from http://msdn.microsoft.com/en-us/library/aa914305.aspx):
SPF_ASYNC = (1 << 0)
SPF_PURGEBEFORESPEAK = (1 << 1)
def __init__(self, controller=None):
TTSEngine.__init__(self, controller=controller)
self.sapi=None
@staticmethod
def can_run():
"""Can this engine run ?
"""
try:
import win32com.client
voice = win32com.client.Dispatch("sapi.SPVoice")
except:
voice = None
return voice
def pronounce (self, sentence):
if self.sapi is None:
import win32com.client
self.sapi=win32com.client.Dispatch("sapi.SPVoice")
self.sapi.Speak( sentence.encode(config.data.preferences['tts-encoding'], 'ignore'), self.SPF_ASYNC | self.SPF_PURGEBEFORESPEAK )
return True
@ttsengine('custom')
class CustomTTSEngine(TTSEngine):
"""Custom TTSEngine.
It tries to run a 'prononce' ('prononce.bat' on win32) script,
which takes strings on its stdin and pronounces them.
"""
if config.data.os == 'win32':
prgname='prononce.bat'
else:
prgname='prononce'
def __init__(self, controller=None):
TTSEngine.__init__(self, controller=controller)
self.language=None
self.prg_path=helper.find_in_path(CustomTTSEngine.prgname)
self.prg_process=None
@staticmethod
def can_run():
"""Can this engine run ?
"""
return helper.find_in_path(CustomTTSEngine.prgname) is not None
def close(self):
"""Close the process.
"""
if self.prg_process is not None:
if config.data.os == 'win32':
import win32api
win32api.TerminateProcess(int(self.prg_process._handle), -1)
else:
os.kill(self.prg_process.pid, signal.SIGTERM)
self.prg_process.wait()
self.prg_process=None
def pronounce (self, sentence):
lang=config.data.preferences.get('tts-language', 'en')
if self.language != lang:
self.close()
self.language=lang
try:
if config.data.os == 'win32':
import win32process
kw = { 'creationflags': win32process.CREATE_NO_WINDOW }
else:
kw = { 'preexec_fn': subprocess_setup }
if self.prg_process is None:
self.prg_process = subprocess.Popen([ self.prg_path, '-v', self.language ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, **kw)
self.prg_process.stdin.write((sentence + "\n").encode(config.data.preferences['tts-encoding'], 'ignore'))
except OSError as e:
logger.error("TTS Error: %s", str(e.message))
return True
@ttsengine('customarg')
class CustomArgTTSEngine(TTSEngine):
"""CustomArg TTSEngine.
It tries to run a 'prononcearg' ('prononcearg.bat' on win32) script,
which takes strings as arguments and pronounces them.
"""
if config.data.os == 'win32':
prgname='prononcearg.bat'
else:
prgname='prononcearg'
def __init__(self, controller=None):
TTSEngine.__init__(self, controller=controller)
self.language=None
self.prg_path=helper.find_in_path(CustomArgTTSEngine.prgname)
@staticmethod
def can_run():
"""Can this engine run ?
"""
return helper.find_in_path(CustomArgTTSEngine.prgname) is not None
def close(self):
"""Close the process.
"""
pass
def pronounce (self, sentence):
lang=config.data.preferences.get('tts-language', 'en')
if self.language != lang:
self.close()
self.language=lang
try:
if config.data.os == 'win32':
import win32process
kw = { 'creationflags': win32process.CREATE_NO_WINDOW }
else:
kw = { 'preexec_fn': subprocess_setup }
subprocess.Popen(str(" ".join([self.prg_path, '-v', self.language, '"%s"' % (sentence.replace('\n',' ').replace('"', '') + "\n")])).encode(config.data.preferences['tts-encoding'], 'ignore'), **kw)
except OSError as e:
try:
m = str(e.message)
except UnicodeDecodeError:
logger.error("TTS: Error decoding error message with standard encoding %s", m.encode('ascii', 'replace'))
return True
| gpl-2.0 | -2,986,952,003,950,942,000 | -1,884,326,721,295,272,000 | 35.220049 | 208 | 0.601256 | false |
d9pouces/StarterPyth | starterpyth/model.py | 1 | 13326 | # -*- coding=utf-8 -*-
import datetime
import os
import shutil
import subprocess
from jinja2 import ChoiceLoader
import pkg_resources
from six import u
from starterpyth.cliforms import BaseForm
from starterpyth.utils import binary_path, walk
from starterpyth.log import display, GREEN, CYAN, RED
from starterpyth.translation import ugettext as _
__author__ = 'flanker'
class Model(object):
name = None
template_includes = [('starterpyth', 'templates/includes')]
include_suffix = '_inc'
template_suffix = '_tpl'
class ExtraForm(BaseForm):
pass
def __init__(self, base_context):
"""
:param base_context: dictionnary with the following keys:
string values
* project_name: explicit name of the project ( [a-zA-Z_\-]\w* )
* module_name: Python base module ( [a-z][\-_a-z0-9]* )
some boolean values:
* use_py2, use_py3: use Python 2 or Python 3
* use_py26, use_py27, use_py30, use_py31, use_py32, use_py33, use_py34, use_py35
* use_six, use_2to3: use six or 2to3 for Python 2&3 compatibility
"""
self.global_context = base_context
self.file_context = None
@property
def template_roots(self):
result = []
return result
def run(self, interactive=True):
project_root = self.global_context['project_root']
if os.path.exists(project_root):
if self.global_context['overwrite']:
if os.path.isdir(project_root):
shutil.rmtree(project_root)
else:
os.remove(project_root)
else:
display(_('Destination path already exists!'), color=RED, bold=True)
return
context = self.get_context()
self.global_context.update(context)
extra_form = self.get_extraform(interactive=interactive)
self.global_context.update(extra_form)
extra_context = self.get_extracontext()
self.global_context.update(extra_context)
filters = self.get_template_filters()
self.set_virtualenvs()
for modname, dirname in self.template_roots:
display('dirname %s' % dirname, color=CYAN)
env = self.get_environment(modname, dirname, filters)
self.write_files(modname, dirname, env)
def set_virtualenvs(self):
virtualenv_path = None
virtualenv_version = None
for k in ('26', '27', '30', '31', '32', '33', '34', '35'):
v = '%s.%s' % (k[0], k[1])
if self.global_context['create_venv%s' % k]:
if self.global_context['virtualenv_present']:
virtualenv_path = ('~/.virtualenvs/%s%s' % (self.global_context['module_name'], k))
python_path = binary_path('python%s' % v)
subprocess.check_call(['virtualenv', os.path.expanduser(virtualenv_path), '-p', python_path])
cmd_list = [os.path.join(os.path.expanduser(virtualenv_path), 'bin', 'python'), '--version']
p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
content = p.communicate()
if content[0]: # Python 3 prints version on stdout
# noinspection PyUnresolvedReferences
virtualenv_version = content[0].decode('utf-8').strip()
else: # Python 2 prints version on stderr
# noinspection PyUnresolvedReferences
virtualenv_version = content[1].decode('utf-8').strip()
self.global_context['virtualenv'] = (virtualenv_path, virtualenv_version)
# noinspection PyMethodMayBeStatic
def get_context(self):
values = {'encoding': 'utf-8', 'entry_points': {}, 'cmdclass': {}, 'ext_modules': [],
'install_requires': [], 'setup_requires': [], 'classifiers': []}
if self.global_context['use_six']:
values['install_requires'] += ['six', 'setuptools>=1.0', ]
values['setup_requires'] += ['six', 'setuptools>=1.0', ]
license_fd = pkg_resources.resource_stream('starterpyth',
'data/licenses/%s.txt' % self.global_context['license'])
values['license_content'] = license_fd.read().decode('utf-8')
values['copyright'] = u('%d, %s') % (datetime.date.today().year, self.global_context['author'])
self.global_context['used_python_versions'] = []
values['classifiers'] += ['Development Status :: 3 - Alpha',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
]
lic = {'CeCILL-A': 'License :: OSI Approved :: CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)',
'CeCILL-B': 'License :: OSI Approved :: CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)',
'BSD-2-clauses': 'License :: OSI Approved :: BSD License',
'Apache-2': 'License :: OSI Approved :: Apache Software License',
'CeCILL-C': 'License :: OSI Approved :: CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)',
'GPL-2': 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'GPL-3': 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'LGPL-2': 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)',
'LGPL-3': 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'MIT': 'License :: OSI Approved :: MIT License',
'APSL': 'License :: OSI Approved :: Apple Public Source License',
'PSFL': 'License :: OSI Approved :: Python Software Foundation License',
}
values['classifiers'] += [lic[self.global_context['license']]]
for k in ('26', '27', '30', '31', '32', '33', '34', '35'):
v = '%s.%s' % (k[0], k[1])
if self.global_context['use_py%s' % k]:
values['classifiers'] += ['Programming Language :: Python :: %s' % v]
self.global_context['used_python_versions'].append(v)
if not self.global_context['use_py2']:
values['classifiers'] += ['Programming Language :: Python :: 3 :: Only']
elif not self.global_context['use_py3']:
values['classifiers'] += ['Programming Language :: Python :: 2 :: Only']
values['tox_used_python_versions'] = [('py' + x[0] + x[-1]) for x in
self.global_context['used_python_versions']]
return values
# noinspection PyMethodMayBeStatic
def get_extracontext(self):
return {}
def get_extraform(self, interactive=True):
form = self.ExtraForm(extra_env=self.global_context)
values = form.read(interactive=interactive)
return values
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def process_directory_or_file(self, src_path, dst_path, name, is_directory):
"""
:param src_path: source path, relative to python module
:param dst_path: absolute destination path
:param name: basename of the file or directory to be processed
:return:
"""
if name in ['.svn', '.git', '.hg', 'CVS'] or name[-len(self.include_suffix):] == self.include_suffix:
return False
return True
# noinspection PyMethodMayBeStatic
def get_environment(self, modname, dirname, filters):
"""
Return a valid Jinja2 environment (with filters)
:param modname:
:param dirname:
:param filters: dictionnary of extra filters for jinja2
:return:
"""
from jinja2 import Environment, PackageLoader
loaders = [PackageLoader(modname, dirname)]
for modname, dirname in self.template_includes:
loaders.append(PackageLoader(modname, dirname))
loader = ChoiceLoader(loaders)
env = Environment(loader=loader)
env.filters.update(filters)
return env
def write_files(self, modname, dirname, env):
"""
Write all templated or raw files to the new project. All template are rendered twice.
This behaviour allows to determine which functions must be imported at the beginning of Python files
:param modname: module containing template files
:param dirname: dirname containing template files in the module `modname`
:param env: Jinja2 environment
:return:
"""
from jinja2 import Template
project_root = self.global_context['project_root']
# creation of the project directory if needed
if not os.path.isdir(project_root):
os.makedirs(project_root)
display(_('Directory %(f)s created.') % {'f': project_root}, color=GREEN)
# noinspection PyTypeChecker
prefix_len = len(dirname) + 1
def get_path(root_, name):
"""return relative source path (to template dir) and absolute destination path"""
src_path_ = (root_ + '/' + name)[prefix_len:]
dst_path_ = src_path_
if os.sep != '/':
dst_path_ = dst_path_.replace('/', os.sep)
if dst_path_.find('{') > -1: # the name of the file is templated
dst_path_ = Template(dst_path_).render(**self.global_context)
if dst_path_[-len(self.template_suffix):] == self.template_suffix:
dst_path_ = dst_path_[:-len(self.template_suffix)]
return src_path_, os.path.join(project_root, dst_path_)
# walk through all files (raw and templates) in modname/dirname and write them to destination
for root, dirnames, filenames in walk(modname, dirname):
for dirname in dirnames:
src_path, dst_path = get_path(root, dirname)
if not self.process_directory_or_file(src_path, dst_path, dirname, True):
continue
if not os.path.isdir(dst_path):
os.makedirs(dst_path)
display(_('Directory %(f)s created.') % {'f': dst_path}, color=GREEN)
for filename in filenames:
src_path, dst_path = get_path(root, filename)
if not self.process_directory_or_file(src_path, dst_path, filename, False):
continue
if not os.path.isdir(os.path.dirname(dst_path)):
continue
if filename[-len(self.template_suffix):] == self.template_suffix:
self.file_context = {'render_pass': 1}
template = env.get_template(src_path)
f_out = open(dst_path, 'wb')
self.file_context.update(self.global_context)
template.render(**self.file_context)
self.file_context['render_pass'] = 2
template_content = template.render(**self.file_context).encode('utf-8')
f_out.write(template_content)
f_out.close()
display(_('Template %(f)s written.') % {'f': dst_path}, color=GREEN)
else:
f_out = open(dst_path, 'wb')
f_in = pkg_resources.resource_stream(modname, root + '/' + filename)
data = f_in.read(10240)
while data:
f_out.write(data)
data = f_in.read(10240)
f_in.close()
f_out.close()
display(_('File %(f)s written.') % {'f': dst_path}, color=GREEN)
def increment(self, key):
self.file_context[key] = self.file_context.get(key, 0) + 1
def text(self, value):
return self.raw_text(value)
def raw_text(self, value):
if '\n' in value:
prefix = '"""'
elif "'" not in value:
prefix = "'"
elif '"' not in value:
prefix = '"'
else:
value = value.replace("'", "\\'")
prefix = "'"
self.increment('counter_unicode_literals')
return '%s%s%s' % (prefix, value, prefix)
def docstring(self, value):
self.increment('counter_unicode_literals')
return '"""%s"""' % value
def translate(self, value):
if not self.global_context['use_i18n']:
return self.text(value)
self.increment('counter_i18n')
return "_(%s)" % self.text(value)
def binary(self, value):
return 'b' + self.raw_text(value)
def get_template_filters(self):
return {'text': self.text, 'binary': self.binary, 'repr': lambda x: repr(x), 'translate': self.translate,
'docstring': self.docstring, 'raw_text': self.raw_text}
if __name__ == '__main__':
import doctest
doctest.testmod() | gpl-2.0 | 7,746,954,114,516,768,000 | -6,578,938,572,040,042,000 | 44.640411 | 120 | 0.55523 | false |
dh4nav/lammps | tools/python/pizza/dump.py | 21 | 40376 | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, [email protected], Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# dump tool
oneline = "Read, write, manipulate dump files and particle attributes"
docstr = """
d = dump("dump.one") read in one or more dump files
d = dump("dump.1 dump.2.gz") can be gzipped
d = dump("dump.*") wildcard expands to multiple files
d = dump("dump.*",0) two args = store filenames, but don't read
incomplete and duplicate snapshots are deleted
if atoms have 5 or 8 columns, assign id,type,x,y,z (ix,iy,iz)
atoms will be unscaled if stored in files as scaled
time = d.next() read next snapshot from dump files
used with 2-argument constructor to allow reading snapshots one-at-a-time
snapshot will be skipped only if another snapshot has same time stamp
return time stamp of snapshot read
return -1 if no snapshots left or last snapshot is incomplete
no column name assignment or unscaling is performed
d.map(1,"id",3,"x") assign names to atom columns (1-N)
not needed if dump file is self-describing
d.tselect.all() select all timesteps
d.tselect.one(N) select only timestep N
d.tselect.none() deselect all timesteps
d.tselect.skip(M) select every Mth step
d.tselect.test("$t >= 100 and $t < 10000") select matching timesteps
d.delete() delete non-selected timesteps
selecting a timestep also selects all atoms in the timestep
skip() and test() only select from currently selected timesteps
test() uses a Python Boolean expression with $t for timestep value
Python comparison syntax: == != < > <= >= and or
d.aselect.all() select all atoms in all steps
d.aselect.all(N) select all atoms in one step
d.aselect.test("$id > 100 and $type == 2") select match atoms in all steps
d.aselect.test("$id > 100 and $type == 2",N) select matching atoms in one step
all() with no args selects atoms from currently selected timesteps
test() with one arg selects atoms from currently selected timesteps
test() sub-selects from currently selected atoms
test() uses a Python Boolean expression with $ for atom attributes
Python comparison syntax: == != < > <= >= and or
$name must end with a space
d.write("file") write selected steps/atoms to dump file
d.write("file",head,app) write selected steps/atoms to dump file
d.scatter("tmp") write selected steps/atoms to multiple files
write() can be specified with 2 additional flags
headd = 0/1 for no/yes snapshot header, app = 0/1 for write vs append
scatter() files are given timestep suffix: e.g. tmp.0, tmp.100, etc
d.scale() scale x,y,z to 0-1 for all timesteps
d.scale(100) scale atom coords for timestep N
d.unscale() unscale x,y,z to box size to all timesteps
d.unscale(1000) unscale atom coords for timestep N
d.wrap() wrap x,y,z into periodic box via ix,iy,iz
d.unwrap() unwrap x,y,z out of box via ix,iy,iz
d.owrap("other") wrap x,y,z to same image as another atom
d.sort() sort atoms by atom ID in all selected steps
d.sort("x") sort atoms by column value in all steps
d.sort(1000) sort atoms in timestep N
scale(), unscale(), wrap(), unwrap(), owrap() operate on all steps and atoms
wrap(), unwrap(), owrap() require ix,iy,iz be defined
owrap() requires a column be defined which contains an atom ID
name of that column is the argument to owrap()
x,y,z for each atom is wrapped to same image as the associated atom ID
useful for wrapping all molecule's atoms the same so it is contiguous
m1,m2 = d.minmax("type") find min/max values for a column
d.set("$ke = $vx * $vx + $vy * $vy") set a column to a computed value
d.setv("type",vector) set a column to a vector of values
d.spread("ke",N,"color") 2nd col = N ints spread over 1st col
d.clone(1000,"color") clone timestep N values to other steps
minmax() operates on selected timesteps and atoms
set() operates on selected timesteps and atoms
left hand side column is created if necessary
left-hand side column is unset or unchanged for non-selected atoms
equation is in Python syntax
use $ for column names, $name must end with a space
setv() operates on selected timesteps and atoms
if column label does not exist, column is created
values in vector are assigned sequentially to atoms, so may want to sort()
length of vector must match # of selected atoms
spread() operates on selected timesteps and atoms
min and max are found for 1st specified column across all selected atoms
atom's value is linear mapping (1-N) between min and max
that is stored in 2nd column (created if needed)
useful for creating a color map
clone() operates on selected timesteps and atoms
values at every timestep are set to value at timestep N for that atom ID
useful for propagating a color map
t = d.time() return vector of selected timestep values
fx,fy,... = d.atom(100,"fx","fy",...) return vector(s) for atom ID N
fx,fy,... = d.vecs(1000,"fx","fy",...) return vector(s) for timestep N
atom() returns vectors with one value for each selected timestep
vecs() returns vectors with one value for each selected atom in the timestep
index,time,flag = d.iterator(0/1) loop over dump snapshots
time,box,atoms,bonds,tris = d.viz(index) return list of viz objects
d.atype = "color" set column returned as "type" by viz
d.extra("dump.bond") read bond list from dump file
d.extra(data) extract bond/tri/line list from data
iterator() loops over selected timesteps
iterator() called with arg = 0 first time, with arg = 1 on subsequent calls
index = index within dump object (0 to # of snapshots)
time = timestep value
flag = -1 when iteration is done, 1 otherwise
viz() returns info for selected atoms for specified timestep index
time = timestep value
box = [xlo,ylo,zlo,xhi,yhi,zhi]
atoms = id,type,x,y,z for each atom as 2d array
bonds = id,type,x1,y1,z1,x2,y2,z2,t1,t2 for each bond as 2d array
if bonds() was used to define bonds, else empty list
tris = id,type,x1,y1,z1,x2,y2,z2,x3,y3,z3,nx,ny,nz for each tri as 2d array
if extra() was used to define tris, else empty list
lines = id,type,x1,y1,z1,x2,y2,z2 for each line as 2d array
if extra() was used to define lines, else empty list
atype is column name viz() will return as atom type (def = "type")
extra() stores list of bonds/tris/lines to return each time viz() is called
"""
# History
# 8/05, Steve Plimpton (SNL): original version
# 12/09, David Hart (SNL): allow use of NumPy or Numeric
# ToDo list
# try to optimize this line in read_snap: words += f.readline().split()
# allow $name in aselect.test() and set() to end with non-space
# should next() snapshot be auto-unscaled ?
# Variables
# flist = list of dump file names
# increment = 1 if reading snapshots one-at-a-time
# nextfile = which file to read from via next()
# eof = ptr into current file for where to read via next()
# nsnaps = # of snapshots
# nselect = # of selected snapshots
# snaps = list of snapshots
# names = dictionary of column names:
# key = "id", value = column # (0 to M-1)
# tselect = class for time selection
# aselect = class for atom selection
# atype = name of vector used as atom type by viz extract
# bondflag = 0 if no bonds, 1 if they are defined statically
# bondlist = static list of bonds to viz() return for all snapshots
# only a list of atom pairs, coords have to be created for each snapshot
# triflag = 0 if no tris, 1 if they are defined statically, 2 if dynamic
# trilist = static list of tris to return via viz() for all snapshots
# lineflag = 0 if no lines, 1 if they are defined statically
# linelist = static list of lines to return via viz() for all snapshots
# Snap = one snapshot
# time = time stamp
# tselect = 0/1 if this snapshot selected
# natoms = # of atoms
# nselect = # of selected atoms in this snapshot
# aselect[i] = 0/1 for each atom
# xlo,xhi,ylo,yhi,zlo,zhi = box bounds (float)
# atoms[i][j] = 2d array of floats, i = 0 to natoms-1, j = 0 to ncols-1
# Imports and external programs
import sys, commands, re, glob, types
from os import popen
from math import * # any function could be used by set()
try:
import numpy as np
oldnumeric = False
except:
import Numeric as np
oldnumeric = True
try: from DEFAULTS import PIZZA_GUNZIP
except: PIZZA_GUNZIP = "gunzip"
# Class definition
class dump:
# --------------------------------------------------------------------
def __init__(self,*list):
self.snaps = []
self.nsnaps = self.nselect = 0
self.names = {}
self.tselect = tselect(self)
self.aselect = aselect(self)
self.atype = "type"
self.bondflag = 0
self.bondlist = []
self.triflag = 0
self.trilist = []
self.triobj = 0
self.lineflag = 0
self.linelist = []
# flist = list of all dump file names
words = list[0].split()
self.flist = []
for word in words: self.flist += glob.glob(word)
if len(self.flist) == 0 and len(list) == 1:
raise StandardError,"no dump file specified"
if len(list) == 1:
self.increment = 0
self.read_all()
else:
self.increment = 1
self.nextfile = 0
self.eof = 0
# --------------------------------------------------------------------
def read_all(self):
# read all snapshots from each file
# test for gzipped files
for file in self.flist:
if file[-3:] == ".gz":
f = popen("%s -c %s" % (PIZZA_GUNZIP,file),'r')
else: f = open(file)
snap = self.read_snapshot(f)
while snap:
self.snaps.append(snap)
print snap.time,
sys.stdout.flush()
snap = self.read_snapshot(f)
f.close()
print
# sort entries by timestep, cull duplicates
self.snaps.sort(self.compare_time)
self.cull()
self.nsnaps = len(self.snaps)
print "read %d snapshots" % self.nsnaps
# select all timesteps and atoms
self.tselect.all()
# set default names for atom columns if file wasn't self-describing
if len(self.snaps) == 0:
print "no column assignments made"
elif len(self.names):
print "assigned columns:",self.names2str()
elif self.snaps[0].atoms == None:
print "no column assignments made"
elif len(self.snaps[0].atoms[0]) == 5:
self.map(1,"id",2,"type",3,"x",4,"y",5,"z")
print "assigned columns:",self.names2str()
elif len(self.snaps[0].atoms[0]) == 8:
self.map(1,"id",2,"type",3,"x",4,"y",5,"z",6,"ix",7,"iy",8,"iz")
print "assigned columns:",self.names2str()
else:
print "no column assignments made"
# if snapshots are scaled, unscale them
if (not self.names.has_key("x")) or \
(not self.names.has_key("y")) or \
(not self.names.has_key("z")):
print "no unscaling could be performed"
elif self.nsnaps > 0:
if self.scaled(self.nsnaps-1): self.unscale()
else: print "dump is already unscaled"
# --------------------------------------------------------------------
# read next snapshot from list of files
def next(self):
if not self.increment: raise StandardError,"cannot read incrementally"
# read next snapshot in current file using eof as pointer
# if fail, try next file
# if new snapshot time stamp already exists, read next snapshot
while 1:
f = open(self.flist[self.nextfile],'rb')
f.seek(self.eof)
snap = self.read_snapshot(f)
if not snap:
self.nextfile += 1
if self.nextfile == len(self.flist): return -1
f.close()
self.eof = 0
continue
self.eof = f.tell()
f.close()
try:
self.findtime(snap.time)
continue
except: break
# select the new snapshot with all its atoms
self.snaps.append(snap)
snap = self.snaps[self.nsnaps]
snap.tselect = 1
snap.nselect = snap.natoms
for i in xrange(snap.natoms): snap.aselect[i] = 1
self.nsnaps += 1
self.nselect += 1
return snap.time
# --------------------------------------------------------------------
# read a single snapshot from file f
# return snapshot or 0 if failed
# assign column names if not already done and file is self-describing
# convert xs,xu to x
def read_snapshot(self,f):
try:
snap = Snap()
item = f.readline()
snap.time = int(f.readline().split()[0]) # just grab 1st field
item = f.readline()
snap.natoms = int(f.readline())
snap.aselect = np.zeros(snap.natoms)
item = f.readline()
words = f.readline().split()
snap.xlo,snap.xhi = float(words[0]),float(words[1])
words = f.readline().split()
snap.ylo,snap.yhi = float(words[0]),float(words[1])
words = f.readline().split()
snap.zlo,snap.zhi = float(words[0]),float(words[1])
item = f.readline()
if len(self.names) == 0:
words = item.split()[2:]
if len(words):
for i in range(len(words)):
if words[i] == "xs" or words[i] == "xu":
self.names["x"] = i
elif words[i] == "ys" or words[i] == "yu":
self.names["y"] = i
elif words[i] == "zs" or words[i] == "zu":
self.names["z"] = i
else: self.names[words[i]] = i
if snap.natoms:
words = f.readline().split()
ncol = len(words)
for i in xrange(1,snap.natoms):
words += f.readline().split()
floats = map(float,words)
if oldnumeric: atoms = np.zeros((snap.natoms,ncol),np.Float)
else: atoms = np.zeros((snap.natoms,ncol),np.float)
start = 0
stop = ncol
for i in xrange(snap.natoms):
atoms[i] = floats[start:stop]
start = stop
stop += ncol
else: atoms = None
snap.atoms = atoms
return snap
except:
return 0
# --------------------------------------------------------------------
# decide if snapshot i is scaled/unscaled from coords of first and last atom
def scaled(self,i):
ix = self.names["x"]
iy = self.names["y"]
iz = self.names["z"]
natoms = self.snaps[i].natoms
if natoms == 0: return 0
x1 = self.snaps[i].atoms[0][ix]
y1 = self.snaps[i].atoms[0][iy]
z1 = self.snaps[i].atoms[0][iz]
x2 = self.snaps[i].atoms[natoms-1][ix]
y2 = self.snaps[i].atoms[natoms-1][iy]
z2 = self.snaps[i].atoms[natoms-1][iz]
if x1 >= -0.1 and x1 <= 1.1 and y1 >= -0.1 and y1 <= 1.1 and \
z1 >= -0.1 and z1 <= 1.1 and x2 >= -0.1 and x2 <= 1.1 and \
y2 >= -0.1 and y2 <= 1.1 and z2 >= -0.1 and z2 <= 1.1:
return 1
else: return 0
# --------------------------------------------------------------------
# map atom column names
def map(self,*pairs):
if len(pairs) % 2 != 0:
raise StandardError, "dump map() requires pairs of mappings"
for i in range(0,len(pairs),2):
j = i + 1
self.names[pairs[j]] = pairs[i]-1
# delete unselected snapshots
# --------------------------------------------------------------------
def delete(self):
ndel = i = 0
while i < self.nsnaps:
if not self.snaps[i].tselect:
del self.snaps[i]
self.nsnaps -= 1
ndel += 1
else: i += 1
print "%d snapshots deleted" % ndel
print "%d snapshots remaining" % self.nsnaps
# --------------------------------------------------------------------
# scale coords to 0-1 for all snapshots or just one
def scale(self,*list):
if len(list) == 0:
print "Scaling dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
for snap in self.snaps: self.scale_one(snap,x,y,z)
else:
i = self.findtime(list[0])
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
self.scale_one(self.snaps[i],x,y,z)
# --------------------------------------------------------------------
def scale_one(self,snap,x,y,z):
if snap.atoms == None: return
xprdinv = 1.0 / (snap.xhi - snap.xlo)
yprdinv = 1.0 / (snap.yhi - snap.ylo)
zprdinv = 1.0 / (snap.zhi - snap.zlo)
atoms = snap.atoms
atoms[:,x] = (atoms[:,x] - snap.xlo) * xprdinv
atoms[:,y] = (atoms[:,y] - snap.ylo) * yprdinv
atoms[:,z] = (atoms[:,z] - snap.zlo) * zprdinv
# --------------------------------------------------------------------
# unscale coords from 0-1 to box size for all snapshots or just one
def unscale(self,*list):
if len(list) == 0:
print "Unscaling dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
for snap in self.snaps: self.unscale_one(snap,x,y,z)
else:
i = self.findtime(list[0])
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
self.unscale_one(self.snaps[i],x,y,z)
# --------------------------------------------------------------------
def unscale_one(self,snap,x,y,z):
if snap.atoms == None: return
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
atoms[:,x] = snap.xlo + atoms[:,x]*xprd
atoms[:,y] = snap.ylo + atoms[:,y]*yprd
atoms[:,z] = snap.zlo + atoms[:,z]*zprd
# --------------------------------------------------------------------
# wrap coords from outside box to inside
def wrap(self):
print "Wrapping dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
ix = self.names["ix"]
iy = self.names["iy"]
iz = self.names["iz"]
for snap in self.snaps:
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
atoms[:,x] -= atoms[:,ix]*xprd
atoms[:,y] -= atoms[:,iy]*yprd
atoms[:,z] -= atoms[:,iz]*zprd
# --------------------------------------------------------------------
# unwrap coords from inside box to outside
def unwrap(self):
print "Unwrapping dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
ix = self.names["ix"]
iy = self.names["iy"]
iz = self.names["iz"]
for snap in self.snaps:
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
atoms[:,x] += atoms[:,ix]*xprd
atoms[:,y] += atoms[:,iy]*yprd
atoms[:,z] += atoms[:,iz]*zprd
# --------------------------------------------------------------------
# wrap coords to same image as atom ID stored in "other" column
def owrap(self,other):
print "Wrapping to other ..."
id = self.names["id"]
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
ix = self.names["ix"]
iy = self.names["iy"]
iz = self.names["iz"]
iother = self.names[other]
for snap in self.snaps:
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
ids = {}
for i in xrange(snap.natoms):
ids[atoms[i][id]] = i
for i in xrange(snap.natoms):
j = ids[atoms[i][iother]]
atoms[i][x] += (atoms[i][ix]-atoms[j][ix])*xprd
atoms[i][y] += (atoms[i][iy]-atoms[j][iy])*yprd
atoms[i][z] += (atoms[i][iz]-atoms[j][iz])*zprd
# --------------------------------------------------------------------
# convert column names assignment to a string, in column order
def names2str(self):
ncol = len(self.snaps[0].atoms[0])
pairs = self.names.items()
values = self.names.values()
str = ""
for i in xrange(ncol):
if i in values: str += pairs[values.index(i)][0] + ' '
return str
# --------------------------------------------------------------------
# sort atoms by atom ID in all selected timesteps by default
# if arg = string, sort all steps by that column
# if arg = numeric, sort atoms in single step
def sort(self,*list):
if len(list) == 0:
print "Sorting selected snapshots ..."
id = self.names["id"]
for snap in self.snaps:
if snap.tselect: self.sort_one(snap,id)
elif type(list[0]) is types.StringType:
print "Sorting selected snapshots by %s ..." % list[0]
id = self.names[list[0]]
for snap in self.snaps:
if snap.tselect: self.sort_one(snap,id)
else:
i = self.findtime(list[0])
id = self.names["id"]
self.sort_one(self.snaps[i],id)
# --------------------------------------------------------------------
# sort a single snapshot by ID column
def sort_one(self,snap,id):
atoms = snap.atoms
ids = atoms[:,id]
ordering = np.argsort(ids)
for i in xrange(len(atoms[0])):
atoms[:,i] = np.take(atoms[:,i],ordering)
# --------------------------------------------------------------------
# write a single dump file from current selection
def write(self,file,header=1,append=0):
if len(self.snaps): namestr = self.names2str()
if not append: f = open(file,"w")
else: f = open(file,"a")
for snap in self.snaps:
if not snap.tselect: continue
print snap.time,
sys.stdout.flush()
if header:
print >>f,"ITEM: TIMESTEP"
print >>f,snap.time
print >>f,"ITEM: NUMBER OF ATOMS"
print >>f,snap.nselect
print >>f,"ITEM: BOX BOUNDS"
print >>f,snap.xlo,snap.xhi
print >>f,snap.ylo,snap.yhi
print >>f,snap.zlo,snap.zhi
print >>f,"ITEM: ATOMS",namestr
atoms = snap.atoms
nvalues = len(atoms[0])
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
line = ""
for j in xrange(nvalues):
if (j < 2):
line += str(int(atoms[i][j])) + " "
else:
line += str(atoms[i][j]) + " "
print >>f,line
f.close()
print "\n%d snapshots" % self.nselect
# --------------------------------------------------------------------
# write one dump file per snapshot from current selection
def scatter(self,root):
if len(self.snaps): namestr = self.names2str()
for snap in self.snaps:
if not snap.tselect: continue
print snap.time,
sys.stdout.flush()
file = root + "." + str(snap.time)
f = open(file,"w")
print >>f,"ITEM: TIMESTEP"
print >>f,snap.time
print >>f,"ITEM: NUMBER OF ATOMS"
print >>f,snap.nselect
print >>f,"ITEM: BOX BOUNDS"
print >>f,snap.xlo,snap.xhi
print >>f,snap.ylo,snap.yhi
print >>f,snap.zlo,snap.zhi
print >>f,"ITEM: ATOMS",namestr
atoms = snap.atoms
nvalues = len(atoms[0])
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
line = ""
for j in xrange(nvalues):
if (j < 2):
line += str(int(atoms[i][j])) + " "
else:
line += str(atoms[i][j]) + " "
print >>f,line
f.close()
print "\n%d snapshots" % self.nselect
# --------------------------------------------------------------------
# find min/max across all selected snapshots/atoms for a particular column
def minmax(self,colname):
icol = self.names[colname]
min = 1.0e20
max = -min
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
if atoms[i][icol] < min: min = atoms[i][icol]
if atoms[i][icol] > max: max = atoms[i][icol]
return (min,max)
# --------------------------------------------------------------------
# set a column value via an equation for all selected snapshots
def set(self,eq):
print "Setting ..."
pattern = "\$\w*"
list = re.findall(pattern,eq)
lhs = list[0][1:]
if not self.names.has_key(lhs):
self.newcolumn(lhs)
for item in list:
name = item[1:]
column = self.names[name]
insert = "snap.atoms[i][%d]" % (column)
eq = eq.replace(item,insert)
ceq = compile(eq,'','single')
for snap in self.snaps:
if not snap.tselect: continue
for i in xrange(snap.natoms):
if snap.aselect[i]: exec ceq
# --------------------------------------------------------------------
# set a column value via an input vec for all selected snapshots/atoms
def setv(self,colname,vec):
print "Setting ..."
if not self.names.has_key(colname):
self.newcolumn(colname)
icol = self.names[colname]
for snap in self.snaps:
if not snap.tselect: continue
if snap.nselect != len(vec):
raise StandardError,"vec length does not match # of selected atoms"
atoms = snap.atoms
m = 0
for i in xrange(snap.natoms):
if snap.aselect[i]:
atoms[i][icol] = vec[m]
m += 1
# --------------------------------------------------------------------
# clone value in col across selected timesteps for atoms with same ID
def clone(self,nstep,col):
istep = self.findtime(nstep)
icol = self.names[col]
id = self.names["id"]
ids = {}
for i in xrange(self.snaps[istep].natoms):
ids[self.snaps[istep].atoms[i][id]] = i
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
j = ids[atoms[i][id]]
atoms[i][icol] = self.snaps[istep].atoms[j][icol]
# --------------------------------------------------------------------
# values in old column are spread as ints from 1-N and assigned to new column
def spread(self,old,n,new):
iold = self.names[old]
if not self.names.has_key(new): self.newcolumn(new)
inew = self.names[new]
min,max = self.minmax(old)
print "min/max = ",min,max
gap = max - min
invdelta = n/gap
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
ivalue = int((atoms[i][iold] - min) * invdelta) + 1
if ivalue > n: ivalue = n
if ivalue < 1: ivalue = 1
atoms[i][inew] = ivalue
# --------------------------------------------------------------------
# return vector of selected snapshot time stamps
def time(self):
vec = self.nselect * [0]
i = 0
for snap in self.snaps:
if not snap.tselect: continue
vec[i] = snap.time
i += 1
return vec
# --------------------------------------------------------------------
# extract vector(s) of values for atom ID n at each selected timestep
def atom(self,n,*list):
if len(list) == 0:
raise StandardError, "no columns specified"
columns = []
values = []
for name in list:
columns.append(self.names[name])
values.append(self.nselect * [0])
ncol = len(columns)
id = self.names["id"]
m = 0
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if atoms[i][id] == n: break
if atoms[i][id] != n:
raise StandardError, "could not find atom ID in snapshot"
for j in xrange(ncol):
values[j][m] = atoms[i][columns[j]]
m += 1
if len(list) == 1: return values[0]
else: return values
# --------------------------------------------------------------------
# extract vector(s) of values for selected atoms at chosen timestep
def vecs(self,n,*list):
snap = self.snaps[self.findtime(n)]
if len(list) == 0:
raise StandardError, "no columns specified"
columns = []
values = []
for name in list:
columns.append(self.names[name])
values.append(snap.nselect * [0])
ncol = len(columns)
m = 0
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
for j in xrange(ncol):
values[j][m] = snap.atoms[i][columns[j]]
m += 1
if len(list) == 1: return values[0]
else: return values
# --------------------------------------------------------------------
# add a new column to every snapshot and set value to 0
# set the name of the column to str
def newcolumn(self,str):
ncol = len(self.snaps[0].atoms[0])
self.map(ncol+1,str)
for snap in self.snaps:
atoms = snap.atoms
if oldnumeric: newatoms = np.zeros((snap.natoms,ncol+1),np.Float)
else: newatoms = np.zeros((snap.natoms,ncol+1),np.float)
newatoms[:,0:ncol] = snap.atoms
snap.atoms = newatoms
# --------------------------------------------------------------------
# sort snapshots on time stamp
def compare_time(self,a,b):
if a.time < b.time:
return -1
elif a.time > b.time:
return 1
else:
return 0
# --------------------------------------------------------------------
# delete successive snapshots with duplicate time stamp
def cull(self):
i = 1
while i < len(self.snaps):
if self.snaps[i].time == self.snaps[i-1].time:
del self.snaps[i]
else:
i += 1
# --------------------------------------------------------------------
# iterate over selected snapshots
def iterator(self,flag):
start = 0
if flag: start = self.iterate + 1
for i in xrange(start,self.nsnaps):
if self.snaps[i].tselect:
self.iterate = i
return i,self.snaps[i].time,1
return 0,0,-1
# --------------------------------------------------------------------
# return list of atoms to viz for snapshot isnap
# augment with bonds, tris, lines if extra() was invoked
def viz(self,isnap):
snap = self.snaps[isnap]
time = snap.time
box = [snap.xlo,snap.ylo,snap.zlo,snap.xhi,snap.yhi,snap.zhi]
id = self.names["id"]
type = self.names[self.atype]
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
# create atom list needed by viz from id,type,x,y,z
# need Numeric/Numpy mode here
atoms = []
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
atom = snap.atoms[i]
atoms.append([atom[id],atom[type],atom[x],atom[y],atom[z]])
# create list of current bond coords from static bondlist
# alist = dictionary of atom IDs for atoms list
# lookup bond atom IDs in alist and grab their coords
# try is used since some atoms may be unselected
# any bond with unselected atom is not returned to viz caller
# need Numeric/Numpy mode here
bonds = []
if self.bondflag:
alist = {}
for i in xrange(len(atoms)): alist[int(atoms[i][0])] = i
for bond in self.bondlist:
try:
i = alist[bond[2]]
j = alist[bond[3]]
atom1 = atoms[i]
atom2 = atoms[j]
bonds.append([bond[0],bond[1],atom1[2],atom1[3],atom1[4],
atom2[2],atom2[3],atom2[4],atom1[1],atom2[1]])
except: continue
tris = []
if self.triflag:
if self.triflag == 1: tris = self.trilist
elif self.triflag == 2:
timetmp,boxtmp,atomstmp,bondstmp, \
tris,linestmp = self.triobj.viz(time,1)
lines = []
if self.lineflag: lines = self.linelist
return time,box,atoms,bonds,tris,lines
# --------------------------------------------------------------------
def findtime(self,n):
for i in xrange(self.nsnaps):
if self.snaps[i].time == n: return i
raise StandardError, "no step %d exists" % n
# --------------------------------------------------------------------
# return maximum box size across all selected snapshots
def maxbox(self):
xlo = ylo = zlo = None
xhi = yhi = zhi = None
for snap in self.snaps:
if not snap.tselect: continue
if xlo == None or snap.xlo < xlo: xlo = snap.xlo
if xhi == None or snap.xhi > xhi: xhi = snap.xhi
if ylo == None or snap.ylo < ylo: ylo = snap.ylo
if yhi == None or snap.yhi > yhi: yhi = snap.yhi
if zlo == None or snap.zlo < zlo: zlo = snap.zlo
if zhi == None or snap.zhi > zhi: zhi = snap.zhi
return [xlo,ylo,zlo,xhi,yhi,zhi]
# --------------------------------------------------------------------
# return maximum atom type across all selected snapshots and atoms
def maxtype(self):
icol = self.names["type"]
max = 0
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
if atoms[i][icol] > max: max = atoms[i][icol]
return int(max)
# --------------------------------------------------------------------
# grab bonds/tris/lines from another object
def extra(self,arg):
# read bonds from bond dump file
if type(arg) is types.StringType:
try:
f = open(arg,'r')
item = f.readline()
time = int(f.readline())
item = f.readline()
nbonds = int(f.readline())
item = f.readline()
if not re.search("BONDS",item):
raise StandardError, "could not read bonds from dump file"
words = f.readline().split()
ncol = len(words)
for i in xrange(1,nbonds):
words += f.readline().split()
f.close()
# convert values to int and absolute value since can be negative types
if oldnumeric: bondlist = np.zeros((nbonds,4),np.Int)
else: bondlist = np.zeros((nbonds,4),np.int)
ints = [abs(int(value)) for value in words]
start = 0
stop = 4
for i in xrange(nbonds):
bondlist[i] = ints[start:stop]
start += ncol
stop += ncol
if bondlist:
self.bondflag = 1
self.bondlist = bondlist
except:
raise StandardError,"could not read from bond dump file"
# request bonds from data object
elif type(arg) is types.InstanceType and ".data" in str(arg.__class__):
try:
bondlist = []
bondlines = arg.sections["Bonds"]
for line in bondlines:
words = line.split()
bondlist.append([int(words[0]),int(words[1]),
int(words[2]),int(words[3])])
if bondlist:
self.bondflag = 1
self.bondlist = bondlist
except:
raise StandardError,"could not extract bonds from data object"
# request tris/lines from cdata object
elif type(arg) is types.InstanceType and ".cdata" in str(arg.__class__):
try:
tmp,tmp,tmp,tmp,tris,lines = arg.viz(0)
if tris:
self.triflag = 1
self.trilist = tris
if lines:
self.lineflag = 1
self.linelist = lines
except:
raise StandardError,"could not extract tris/lines from cdata object"
# request tris from mdump object
elif type(arg) is types.InstanceType and ".mdump" in str(arg.__class__):
try:
self.triflag = 2
self.triobj = arg
except:
raise StandardError,"could not extract tris from mdump object"
else:
raise StandardError,"unrecognized argument to dump.extra()"
# --------------------------------------------------------------------
def compare_atom(self,a,b):
if a[0] < b[0]:
return -1
elif a[0] > b[0]:
return 1
else:
return 0
# --------------------------------------------------------------------
# one snapshot
class Snap:
pass
# --------------------------------------------------------------------
# time selection class
class tselect:
def __init__(self,data):
self.data = data
# --------------------------------------------------------------------
def all(self):
data = self.data
for snap in data.snaps:
snap.tselect = 1
data.nselect = len(data.snaps)
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def one(self,n):
data = self.data
for snap in data.snaps:
snap.tselect = 0
i = data.findtime(n)
data.snaps[i].tselect = 1
data.nselect = 1
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def none(self):
data = self.data
for snap in data.snaps:
snap.tselect = 0
data.nselect = 0
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def skip(self,n):
data = self.data
count = n-1
for snap in data.snaps:
if not snap.tselect: continue
count += 1
if count == n:
count = 0
continue
snap.tselect = 0
data.nselect -= 1
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def test(self,teststr):
data = self.data
snaps = data.snaps
cmd = "flag = " + teststr.replace("$t","snaps[i].time")
ccmd = compile(cmd,'','single')
for i in xrange(data.nsnaps):
if not snaps[i].tselect: continue
exec ccmd
if not flag:
snaps[i].tselect = 0
data.nselect -= 1
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
# atom selection class
class aselect:
def __init__(self,data):
self.data = data
# --------------------------------------------------------------------
def all(self,*args):
data = self.data
if len(args) == 0: # all selected timesteps
for snap in data.snaps:
if not snap.tselect: continue
for i in xrange(snap.natoms): snap.aselect[i] = 1
snap.nselect = snap.natoms
else: # one timestep
n = data.findtime(args[0])
snap = data.snaps[n]
for i in xrange(snap.natoms): snap.aselect[i] = 1
snap.nselect = snap.natoms
# --------------------------------------------------------------------
def test(self,teststr,*args):
data = self.data
# replace all $var with snap.atoms references and compile test string
pattern = "\$\w*"
list = re.findall(pattern,teststr)
for item in list:
name = item[1:]
column = data.names[name]
insert = "snap.atoms[i][%d]" % column
teststr = teststr.replace(item,insert)
cmd = "flag = " + teststr
ccmd = compile(cmd,'','single')
if len(args) == 0: # all selected timesteps
for snap in data.snaps:
if not snap.tselect: continue
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
exec ccmd
if not flag:
snap.aselect[i] = 0
snap.nselect -= 1
for i in xrange(data.nsnaps):
if data.snaps[i].tselect:
print "%d atoms of %d selected in first step %d" % \
(data.snaps[i].nselect,data.snaps[i].natoms,data.snaps[i].time)
break
for i in xrange(data.nsnaps-1,-1,-1):
if data.snaps[i].tselect:
print "%d atoms of %d selected in last step %d" % \
(data.snaps[i].nselect,data.snaps[i].natoms,data.snaps[i].time)
break
else: # one timestep
n = data.findtime(args[0])
snap = data.snaps[n]
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
exec ccmd
if not flag:
snap.aselect[i] = 0
snap.nselect -= 1
| gpl-2.0 | 8,468,125,030,432,317,000 | 5,099,665,983,795,876,000 | 31.79935 | 79 | 0.54837 | false |
algorhythms/LeetCode | 652 Find Duplicate Subtrees.py | 1 | 2915 | #!/usr/bin/python3
"""
Given a binary tree, return all duplicate subtrees. For each kind of duplicate
subtrees, you only need to return the root node of any one of them.
Two trees are duplicate if they have the same structure with same node values.
Example 1:
1
/ \
2 3
/ / \
4 2 4
/
4
The following are two duplicate subtrees:
2
/
4
and
4
Therefore, you need to return above trees' root in the form of a list.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from typing import List
from collections import defaultdict
class MerkleHash:
def __init__(self):
self.start_key = 0
self.merkle_hash = defaultdict(self._auto_incr) # subtree -> id
def _auto_incr(self):
self.start_key += 1
return self.start_key
def __call__(self, val):
return self.merkle_hash[val]
class Solution:
def __init__(self):
self.counter = defaultdict(int)
self.merkle_hash = MerkleHash()
def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:
"""
Merkle hash based on current val, and left substree merkle and right merkle
Assign each subtree a identity/hash
Chain of hash can uniquely identify a subtree
"""
ret = []
self.walk(root, ret)
return ret
def walk(self, cur, ret) -> int:
"""
return merkle hash id
"""
if not cur:
return self.merkle_hash(None)
subtree_value = (cur.val, self.walk(cur.left, ret), self.walk(cur.right, ret))
merkle_hash = self.merkle_hash(subtree_value)
if self.counter[merkle_hash] == 1:
ret.append(cur)
self.counter[merkle_hash] += 1
return merkle_hash
class Solution2:
def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:
"""
Only need to return the root
"""
ret = []
self.walk(root, defaultdict(int), ret)
return ret
def walk(self, cur, counter, ret) -> str:
"""
serialize the subtrees and check existence
Needs to have a unique representation
for the key, cannot but cur.val in the middle as not be able to
differentiate between
0
/
0
0
\
0
because you don't know which one is the root
complexity: O(N) * O(N) (string concatenation),
"""
if not cur:
return "None"
cur_key = ",".join([
self.walk(cur.left, counter, ret),
self.walk(cur.right, counter, ret),
str(cur.val),
])
if counter[cur_key] == 1:
ret.append(cur)
counter[cur_key] += 1
return cur_key
| mit | 4,692,219,857,267,920,000 | 5,988,872,458,317,699,000 | 22.134921 | 86 | 0.560549 | false |
Comcast/neutron | neutron/debug/debug_agent.py | 4 | 7959 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import shlex
import socket
import netaddr
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux.dhcp import DictModel
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
DEVICE_OWNER_NETWORK_PROBE = 'network:probe'
DEVICE_OWNER_COMPUTE_PROBE = 'compute:probe'
class NeutronDebugAgent():
OPTS = [
# Needed for drivers
cfg.BoolOpt('use_namespaces', default=True,
help=_("Use Linux network namespaces")),
cfg.StrOpt('interface_driver',
help=_("The driver used to manage the virtual "
"interface.")),
cfg.StrOpt('external_network_bridge', default='br-ex',
help=_("Name of bridge used for external network "
"traffic.")),
]
def __init__(self, conf, client, driver):
self.conf = conf
self.root_helper = config.get_root_helper(conf)
self.client = client
self.driver = driver
def _get_namespace(self, port):
return "qprobe-%s" % port.id
def create_probe(self, network_id, device_owner='network'):
network = self._get_network(network_id)
bridge = None
if network.external:
bridge = self.conf.external_network_bridge
port = self._create_port(network, device_owner)
interface_name = self.driver.get_device_name(port)
namespace = None
if self.conf.use_namespaces:
namespace = self._get_namespace(port)
if ip_lib.device_exists(interface_name, self.root_helper, namespace):
LOG.debug(_('Reusing existing device: %s.'), interface_name)
else:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
bridge=bridge,
namespace=namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
self.driver.init_l3(interface_name, ip_cidrs, namespace=namespace)
return port
def _get_subnet(self, subnet_id):
subnet_dict = self.client.show_subnet(subnet_id)['subnet']
return DictModel(subnet_dict)
def _get_network(self, network_id):
network_dict = self.client.show_network(network_id)['network']
network = DictModel(network_dict)
network.external = network_dict.get('router:external')
obj_subnet = [self._get_subnet(s_id) for s_id in network.subnets]
network.subnets = obj_subnet
return network
def clear_probe(self):
ports = self.client.list_ports(
device_id=socket.gethostname(),
device_owner=[DEVICE_OWNER_NETWORK_PROBE,
DEVICE_OWNER_COMPUTE_PROBE])
info = ports['ports']
for port in info:
self.delete_probe(port['id'])
def delete_probe(self, port_id):
port = DictModel(self.client.show_port(port_id)['port'])
network = self._get_network(port.network_id)
bridge = None
if network.external:
bridge = self.conf.external_network_bridge
ip = ip_lib.IPWrapper(self.root_helper)
namespace = self._get_namespace(port)
if self.conf.use_namespaces and ip.netns.exists(namespace):
self.driver.unplug(self.driver.get_device_name(port),
bridge=bridge,
namespace=namespace)
try:
ip.netns.delete(namespace)
except Exception:
LOG.warn(_('Failed to delete namespace %s'), namespace)
else:
self.driver.unplug(self.driver.get_device_name(port),
bridge=bridge)
self.client.delete_port(port.id)
def list_probes(self):
ports = self.client.list_ports(
device_owner=[DEVICE_OWNER_NETWORK_PROBE,
DEVICE_OWNER_COMPUTE_PROBE])
info = ports['ports']
for port in info:
port['device_name'] = self.driver.get_device_name(DictModel(port))
return info
def exec_command(self, port_id, command=None):
port = DictModel(self.client.show_port(port_id)['port'])
ip = ip_lib.IPWrapper(self.root_helper)
namespace = self._get_namespace(port)
if self.conf.use_namespaces:
if not command:
return "sudo ip netns exec %s" % self._get_namespace(port)
namespace = ip.ensure_namespace(namespace)
return namespace.netns.execute(shlex.split(command))
else:
return utils.execute(shlex.split(command))
def ensure_probe(self, network_id):
ports = self.client.list_ports(network_id=network_id,
device_id=socket.gethostname(),
device_owner=DEVICE_OWNER_NETWORK_PROBE)
info = ports.get('ports', [])
if info:
return DictModel(info[0])
else:
return self.create_probe(network_id)
def ping_all(self, network_id=None, timeout=1):
if network_id:
ports = self.client.list_ports(network_id=network_id)['ports']
else:
ports = self.client.list_ports()['ports']
result = ""
for port in ports:
probe = self.ensure_probe(port['network_id'])
if port['device_owner'] == DEVICE_OWNER_NETWORK_PROBE:
continue
for fixed_ip in port['fixed_ips']:
address = fixed_ip['ip_address']
subnet = self._get_subnet(fixed_ip['subnet_id'])
if subnet.ip_version == 4:
ping_command = 'ping'
else:
ping_command = 'ping6'
result += self.exec_command(probe.id,
'%s -c 1 -w %s %s' % (ping_command,
timeout,
address))
return result
def _create_port(self, network, device_owner):
host = self.conf.host
body = {'port': {'admin_state_up': True,
'network_id': network.id,
'device_id': '%s' % socket.gethostname(),
'device_owner': '%s:probe' % device_owner,
'tenant_id': network.tenant_id,
'binding:host_id': host,
'fixed_ips': [dict(subnet_id=s.id)
for s in network.subnets]}}
port_dict = self.client.create_port(body)['port']
port = DictModel(port_dict)
port.network = network
for fixed_ip in port.fixed_ips:
fixed_ip.subnet = self._get_subnet(fixed_ip.subnet_id)
return port
| apache-2.0 | -7,183,515,487,974,749,000 | 5,460,835,055,347,786,000 | 38.40099 | 79 | 0.557105 | false |
civisanalytics/civis-python | civis/cli/_cli_commands.py | 1 | 11952 | #!/usr/bin/env python3
"""
Additional commands to add to the CLI beyond the OpenAPI spec.
"""
from __future__ import print_function
import functools
import operator
import os
import sys
import time
import click
import requests
import webbrowser
import civis
from civis.io import file_to_civis, civis_to_file
# From http://patorjk.com/software/taag/#p=display&f=3D%20Diagonal&t=CIVIS
_CIVIS_ASCII_ART = r"""
,----.. ,---, ,---, .--.--.
/ / \ ,`--.' | ,---.,`--.' | / / '.
| : :| : : /__./|| : :| : /`. /
. | ;. /: | ' ,---.; ; |: | '; | |--`
. ; /--` | : |/___/ \ | || : || : ;_
; | ; ' ' ;\ ; \ ' |' ' ; \ \ `.
| : | | | | \ \ \: || | | `----. \
. | '___ ' : ; ; \ ' .' : ; __ \ \ |
' ; : .'|| | ' \ \ '| | ' / /`--' /
' | '/ :' : | \ ` ;' : |'--'. /
| : / ; |.' : \ |; |.' `--'---'
\ \ .' '---' '---" '---'
`---`
"""
_FOLLOW_LOG_NOTE = '''
Outputs job run logs in the format: "datetime message\\n" where
datetime is in ISO8601 format, like "2020-02-14T20:28:18.722Z".
If the job is still running, this command will continue outputting logs
until the run is done and then exit. If the run is already finished, it
will output all the logs from that run and then exit.
NOTE: This command could miss some log entries from a currently-running
job. It does not re-fetch logs that might have been saved out of order, to
preserve the chronological order of the logs and without duplication.
'''
_FOLLOW_POLL_INTERVAL_SEC = 3
@click.command('upload')
@click.argument('path')
@click.option('--name', type=str, default=None,
help="A name for the Civis File (defaults to the base file name")
@click.option('--expires-at', type=str, default=None,
help="The date and time the file will expire "
"(ISO-8601 format, e.g., \"2017-01-15\" or "
"\"2017-01-15T15:25:10Z\"). "
"Set \"never\" for the file to not expire."
"The default is the default in Civis (30 days).")
def files_upload_cmd(path, name, expires_at):
"""Upload a local file to Civis and get back the File ID."""
if name is None:
name = os.path.basename(path)
if expires_at is None:
# Use the default in Civis platform (30 days).
expires_kwarg = {}
elif expires_at.lower() == "never":
expires_kwarg = {"expires_at": None}
else:
expires_kwarg = {"expires_at": expires_at}
with open(path, 'rb') as f:
file_id = file_to_civis(f, name=name, **expires_kwarg)
print(file_id)
@click.command('download')
@click.argument('file_id', type=int)
@click.argument('path')
def files_download_cmd(file_id, path):
"""Download a Civis File to a specified local path."""
with open(path, 'wb') as f:
civis_to_file(file_id, f)
@click.command('sql')
@click.option('--dbname', '-d', type=str, required=True,
help='Execute the query on this Civis Platform database')
@click.option('--command', '-c', type=str, default=None,
help='Execute a single input command string')
@click.option('--filename', '-f', type=click.Path(exists=True),
help='Execute a query read from the given file')
@click.option('--output', '-o', type=click.Path(),
help='Download query results to this file')
@click.option('--quiet', '-q', is_flag=True, help='Suppress screen output')
@click.option('-n', type=int, default=100,
help="Display up to this many rows of the result. Max 100.")
def sql_cmd(dbname, command, filename, output, quiet, n):
"""\b Execute a SQL query in Civis Platform
If neither a command nor an input file is specified, read
the SQL command from stdin.
If writing to an output file, use a Civis SQL script and write the
entire query output to the specified file.
If not writing to an output file, use a Civis Query, and return a
preview of the results, up to a maximum of 100 rows.
"""
if filename:
with open(filename, 'rt') as f:
sql = f.read()
elif not command:
# Read the SQL query from user input. This also allows use of a heredoc
lines = []
while True:
try:
_i = input()
except (KeyboardInterrupt, EOFError):
# The end of a heredoc produces an EOFError.
break
if not _i:
break
else:
lines.append(_i)
sql = '\n'.join(lines)
else:
sql = command
if not sql:
# If the user didn't enter a query, exit.
if not quiet:
print('ERROR: Did not receive a SQL query.', file=sys.stderr)
return
if not quiet:
print('\nExecuting query...', file=sys.stderr)
if output:
fut = civis.io.civis_to_csv(output, sql, database=dbname)
fut.result() # Block for completion and raise exceptions if any
if not quiet:
print("Downloaded the result of the query to %s." % output,
file=sys.stderr)
else:
fut = civis.io.query_civis(sql, database=dbname,
preview_rows=n, polling_interval=3)
cols = fut.result()['result_columns']
rows = fut.result()['result_rows']
if not quiet:
print('...Query complete.\n', file=sys.stderr)
print(_str_table_result(cols, rows))
def _str_table_result(cols, rows):
"""Turn a Civis Query result into a readable table."""
str_rows = [['' if _v is None else _v for _v in row] for row in rows]
# Determine the maximum width of each column.
# First find the width of each element in each row, then find the max
# width in each position.
max_len = functools.reduce(
lambda x, y: [max(z) for z in zip(x, y)],
[[len(_v) for _v in _r] for _r in [cols] + str_rows])
header_str = " | ".join("{0:<{width}}".format(_v, width=_l)
for _l, _v in zip(max_len, cols))
tb_strs = [header_str, len(header_str) * '-']
for row in str_rows:
tb_strs.append(" | ".join("{0:>{width}}".format(_v, width=_l)
for _l, _v in zip(max_len, row)))
return '\n'.join(tb_strs)
@click.command(
'follow-log',
help='Output live log from the most recent job run.' + _FOLLOW_LOG_NOTE)
@click.argument('id', type=int)
def jobs_follow_log(id):
client = civis.APIClient()
runs = client.jobs.list_runs(id, limit=1, order='id', order_dir='desc')
if not runs:
raise click.ClickException('No runs found for that job ID.')
run_id = runs[0].id
print('Run ID: ' + str(run_id))
_jobs_follow_run_log(id, run_id)
@click.command(
'follow-run-log',
help='Output live run log.' + _FOLLOW_LOG_NOTE)
@click.argument('id', type=int)
@click.argument('run_id', type=int)
def jobs_follow_run_log(id, run_id):
_jobs_follow_run_log(id, run_id)
def _jobs_follow_run_log(id, run_id):
client = civis.APIClient(return_type='raw')
local_max_log_id = 0
continue_polling = True
while continue_polling:
# This call gets all available log messages since last_id up to
# the page size, ordered by log ID. We leave it to Platform to decide
# the best page size.
response = client.jobs.list_runs_logs(id, run_id,
last_id=local_max_log_id)
if 'civis-max-id' in response.headers:
remote_max_log_id = int(response.headers['civis-max-id'])
else:
# Platform hasn't seen any logs at all yet
remote_max_log_id = None
logs = response.json()
if logs:
local_max_log_id = max(log['id'] for log in logs)
logs.sort(key=operator.itemgetter('createdAt', 'id'))
for log in logs:
print(' '.join((log['createdAt'], log['message'].rstrip())))
# if output is a pipe, write the buffered output immediately:
sys.stdout.flush()
log_finished = response.headers['civis-cache-control'] != 'no-store'
if remote_max_log_id is None:
remote_has_more_logs_to_get_now = False
elif local_max_log_id == remote_max_log_id:
remote_has_more_logs_to_get_now = False
if log_finished:
continue_polling = False
else:
remote_has_more_logs_to_get_now = True
if continue_polling and not remote_has_more_logs_to_get_now:
time.sleep(_FOLLOW_POLL_INTERVAL_SEC)
@click.command('download')
@click.argument('notebook_id', type=int)
@click.argument('path')
def notebooks_download_cmd(notebook_id, path):
"""Download a notebook to a specified local path."""
client = civis.APIClient()
info = client.notebooks.get(notebook_id)
response = requests.get(info['notebook_url'], stream=True)
response.raise_for_status()
chunk_size = 32 * 1024
chunked = response.iter_content(chunk_size)
with open(path, 'wb') as f:
for lines in chunked:
f.write(lines)
@click.command('new')
@click.argument('language', type=click.Choice(['python3', 'r']),
default='python3')
@click.option('--mem', type=int, default=None,
help='Memory allocated for this notebook in MiB.')
@click.option('--cpu', type=int, default=None,
help='CPU available for this notebook in 1/1000 of a core.')
def notebooks_new_cmd(language='python3', mem=None, cpu=None):
"""Create a new notebook and open it in the browser."""
client = civis.APIClient()
kwargs = {'memory': mem, 'cpu': cpu}
kwargs = {k: v for k, v in kwargs.items() if v is not None}
new_nb = client.notebooks.post(language=language, **kwargs)
print("Created new {language} notebook with ID {id} in Civis Platform"
" (https://platform.civisanalytics.com/#/notebooks/{id})."
.format(language=language, id=new_nb.id))
_notebooks_up(new_nb.id)
_notebooks_open(new_nb.id)
@click.command('up')
@click.argument('notebook_id', type=int)
@click.option('--mem', type=int, default=None,
help='Memory allocated for this notebook in MiB.')
@click.option('--cpu', type=int, default=None,
help='CPU available for this notebook in 1/1000 of a core.')
def notebooks_up(notebook_id, mem=None, cpu=None):
"""Start an existing notebook and open it in the browser."""
client = civis.APIClient()
kwargs = {'memory': mem, 'cpu': cpu}
kwargs = {k: v for k, v in kwargs.items() if v is not None}
client.notebooks.patch(notebook_id, **kwargs)
_notebooks_up(notebook_id)
_notebooks_open(notebook_id)
def _notebooks_up(notebook_id):
client = civis.APIClient()
return client.notebooks.post_deployments(notebook_id)
@click.command('down')
@click.argument('notebook_id', type=int)
def notebooks_down(notebook_id):
"""Shut down a running notebook."""
client = civis.APIClient()
nb = client.notebooks.get(notebook_id)
state = nb['most_recent_deployment']['state']
if state not in ['running', 'pending']:
print('Notebook is in state "{}" and can\'t be stopped.'.format(state))
deployment_id = nb['most_recent_deployment']['deploymentId']
client.notebooks.delete_deployments(notebook_id, deployment_id)
@click.command('open')
@click.argument('notebook_id', type=int)
def notebooks_open(notebook_id):
"""Open an existing notebook in the browser."""
_notebooks_open(notebook_id)
def _notebooks_open(notebook_id):
url = 'https://platform.civisanalytics.com/#/notebooks/{}?fullscreen=true'
url = url.format(notebook_id)
webbrowser.open(url, new=2, autoraise=True)
@click.command('civis', help="Print Civis")
def civis_ascii_art():
print(_CIVIS_ASCII_ART)
| bsd-3-clause | -5,900,637,042,495,341,000 | -3,255,044,375,499,494,400 | 36.118012 | 79 | 0.589525 | false |
dhimmel/networkx | networkx/generators/tests/test_line.py | 57 | 2357 | import networkx as nx
from nose.tools import *
import networkx.generators.line as line
def test_node_func():
# graph
G = nx.Graph()
G.add_edge(1,2)
nf = line._node_func(G)
assert_equal(nf(1,2), (1,2))
assert_equal(nf(2,1), (1,2))
# multigraph
G = nx.MultiGraph()
G.add_edge(1,2)
G.add_edge(1,2)
nf = line._node_func(G)
assert_equal(nf(1,2,0), (1,2,0))
assert_equal(nf(2,1,0), (1,2,0))
def test_edge_func():
# graph
G = nx.Graph()
G.add_edge(1,2)
G.add_edge(2,3)
ef = line._edge_func(G)
expected = [(1,2),(2,3)]
result = sorted(ef())
assert_equal(expected, result)
# digraph
G = nx.MultiDiGraph()
G.add_edge(1,2)
G.add_edge(2,3)
G.add_edge(2,3)
ef = line._edge_func(G)
expected = [(1,2,0),(2,3,0),(2,3,1)]
result = sorted(ef())
assert_equal(expected, result)
def test_sorted_edge():
assert_equal( (1,2), line._sorted_edge(1,2) )
assert_equal( (1,2), line._sorted_edge(2,1) )
class TestGeneratorLine():
def test_star(self):
G = nx.star_graph(5)
L = nx.line_graph(G)
assert_true(nx.is_isomorphic(L, nx.complete_graph(5)))
def test_path(self):
G = nx.path_graph(5)
L = nx.line_graph(G)
assert_true(nx.is_isomorphic(L, nx.path_graph(4)))
def test_cycle(self):
G = nx.cycle_graph(5)
L = nx.line_graph(G)
assert_true(nx.is_isomorphic(L, G))
def test_digraph1(self):
G = nx.DiGraph()
G.add_edges_from([(0,1),(0,2),(0,3)])
L = nx.line_graph(G)
# no edge graph, but with nodes
assert_equal(L.adj, {(0,1):{}, (0,2):{}, (0,3):{}})
def test_digraph2(self):
G = nx.DiGraph()
G.add_edges_from([(0,1),(1,2),(2,3)])
L = nx.line_graph(G)
assert_equal(sorted(L.edges()), [((0, 1), (1, 2)), ((1, 2), (2, 3))])
def test_create1(self):
G = nx.DiGraph()
G.add_edges_from([(0,1),(1,2),(2,3)])
L = nx.line_graph(G, create_using=nx.Graph())
assert_equal(sorted(L.edges()), [((0, 1), (1, 2)), ((1, 2), (2, 3))])
def test_create2(self):
G = nx.Graph()
G.add_edges_from([(0,1),(1,2),(2,3)])
L = nx.line_graph(G, create_using=nx.DiGraph())
assert_equal(sorted(L.edges()), [((0, 1), (1, 2)), ((1, 2), (2, 3))])
| bsd-3-clause | -1,495,181,399,335,706,400 | -5,070,593,229,373,443,000 | 26.729412 | 77 | 0.522698 | false |
cchurch/ansible | test/units/modules/storage/netapp/test_netapp_e_auditlog.py | 68 | 10758 | # (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible.modules.storage.netapp.netapp_e_auditlog import AuditLog
from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
__metaclass__ = type
from units.compat import mock
class AuditLogTests(ModuleTestCase):
REQUIRED_PARAMS = {'api_username': 'rw',
'api_password': 'password',
'api_url': 'http://localhost',
'ssid': '1'}
REQ_FUNC = 'ansible.modules.storage.netapp.netapp_e_auditlog.request'
MAX_RECORDS_MAXIMUM = 50000
MAX_RECORDS_MINIMUM = 100
def _set_args(self, **kwargs):
module_args = self.REQUIRED_PARAMS.copy()
if kwargs is not None:
module_args.update(kwargs)
set_module_args(module_args)
def test_max_records_argument_pass(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
max_records_set = (self.MAX_RECORDS_MINIMUM, 25000, self.MAX_RECORDS_MAXIMUM)
for max_records in max_records_set:
initial["max_records"] = max_records
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
audit_log = AuditLog()
self.assertTrue(audit_log.max_records == max_records)
def test_max_records_argument_fail(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
max_records_set = (self.MAX_RECORDS_MINIMUM - 1, self.MAX_RECORDS_MAXIMUM + 1)
for max_records in max_records_set:
with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log max_records count must be between 100 and 50000"):
initial["max_records"] = max_records
self._set_args(**initial)
AuditLog()
def test_threshold_argument_pass(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
threshold_set = (60, 75, 90)
for threshold in threshold_set:
initial["threshold"] = threshold
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
audit_log = AuditLog()
self.assertTrue(audit_log.threshold == threshold)
def test_threshold_argument_fail(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
threshold_set = (59, 91)
for threshold in threshold_set:
with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log percent threshold must be between 60 and 90"):
initial["threshold"] = threshold
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
AuditLog()
def test_is_proxy_pass(self):
"""Verify that True is returned when proxy is used to communicate with storage."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"api_url": "https://10.1.1.10/devmgr/v2"}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
self.assertTrue(audit_log.is_proxy())
def test_is_proxy_fail(self):
"""Verify that AnsibleJsonFail exception is thrown when exception occurs."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the webservices about information"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.is_proxy()
def test_get_configuration_pass(self):
"""Validate get configuration does not throw exception when normal request is returned."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
expected = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, expected)):
body = audit_log.get_configuration()
self.assertTrue(body == expected)
def test_get_configuration_fail(self):
"""Verify AnsibleJsonFail exception is thrown."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the audit-log configuration!"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.get_configuration()
def test_build_configuration_pass(self):
"""Validate configuration changes will force an update."""
response = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
changes = [{"max_records": 50000},
{"log_level": "all"},
{"full_policy": "preventSystemAccess"},
{"threshold": 75}]
for change in changes:
initial_with_changes = initial.copy()
initial_with_changes.update(change)
self._set_args(**initial_with_changes)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, response)):
update = audit_log.build_configuration()
self.assertTrue(update)
def test_delete_log_messages_fail(self):
"""Verify AnsibleJsonFail exception is thrown."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to delete audit-log messages!"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.delete_log_messages()
def test_update_configuration_delete_pass(self):
"""Verify 422 and force successfully returns True."""
body = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 2000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"force": True}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, side_effect=[(200, body),
(422, {u"invalidFieldsIfKnown": None,
u"errorMessage": u"Configuration change...",
u"localizedMessage": u"Configuration change...",
u"retcode": u"auditLogImmediateFullCondition",
u"codeType": u"devicemgrerror"}),
(200, None),
(200, None)]):
self.assertTrue(audit_log.update_configuration())
def test_update_configuration_delete_skip_fail(self):
"""Verify 422 and no force results in AnsibleJsonFail exception."""
body = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 2000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"force": False}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to update audit-log configuration!"):
with mock.patch(self.REQ_FUNC, side_effect=[(200, body), Exception(422, {"errorMessage": "error"}),
(200, None), (200, None)]):
audit_log.update_configuration()
| gpl-3.0 | -7,832,401,116,650,445,000 | 1,410,726,204,710,225,700 | 44.974359 | 120 | 0.545362 | false |
erjohnso/ansible | lib/ansible/modules/system/aix_inittab.py | 26 | 7531 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Joris Weijters <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author: "Joris Weijters (@molekuul)"
module: aix_inittab
short_description: Manages the inittab on AIX.
description:
- Manages the inittab on AIX.
version_added: "2.3"
options:
name:
description:
- Name of the inittab entry.
required: True
aliases: ['service']
runlevel:
description:
- Runlevel of the entry.
required: True
action:
description:
- Action what the init has to do with this entry.
required: True
choices: [
'respawn',
'wait',
'once',
'boot',
'bootwait',
'powerfail',
'powerwait',
'off',
'hold',
'ondemand',
'initdefault',
'sysinit'
]
command:
description:
- What command has to run.
required: True
insertafter:
description:
- After which inittabline should the new entry inserted.
state:
description:
- Whether the entry should be present or absent in the inittab file
choices: [ "present", "absent" ]
default: present
notes:
- The changes are persistent across reboots, you need root rights to read or adjust the inittab with the lsitab, chitab,
mkitab or rmitab commands.
- tested on AIX 7.1.
requirements: [ 'itertools']
'''
EXAMPLES = '''
# Add service startmyservice to the inittab, directly after service existingservice.
- name: Add startmyservice to inittab
aix_inittab:
name: startmyservice
runlevel: 4
action: once
command: "echo hello"
insertafter: existingservice
state: present
become: yes
# Change inittab entry startmyservice to runlevel "2" and processaction "wait".
- name: Change startmyservice to inittab
aix_inittab:
name: startmyservice
runlevel: 2
action: wait
command: "echo hello"
state: present
become: yes
# Remove inittab entry startmyservice.
- name: remove startmyservice from inittab
aix_inittab:
name: startmyservice
runlevel: 2
action: wait
command: "echo hello"
state: absent
become: yes
'''
RETURN = '''
name:
description: name of the adjusted inittab entry
returned: always
type: string
sample: startmyservice
msg:
description: action done with the inittab entry
returned: changed
type: string
sample: changed inittab entry startmyservice
changed:
description: whether the inittab changed or not
returned: always
type: boolean
sample: true
'''
# Import necessary libraries
import itertools
from ansible.module_utils.basic import AnsibleModule
# end import modules
# start defining the functions
def check_current_entry(module):
# Check if entry exists, if not return False in exists in return dict,
# if true return True and the entry in return dict
existsdict = {'exist': False}
lsitab = module.get_bin_path('lsitab')
(rc, out, err) = module.run_command([lsitab, module.params['name']])
if rc == 0:
keys = ('name', 'runlevel', 'action', 'command')
values = out.split(":")
# strip non readable characters as \n
values = map(lambda s: s.strip(), values)
existsdict = dict(itertools.izip(keys, values))
existsdict.update({'exist': True})
return existsdict
def main():
# initialize
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str', aliases=['service']),
runlevel=dict(required=True, type='str'),
action=dict(choices=[
'respawn',
'wait',
'once',
'boot',
'bootwait',
'powerfail',
'powerwait',
'off',
'hold',
'ondemand',
'initdefault',
'sysinit'
], type='str'),
command=dict(required=True, type='str'),
insertafter=dict(type='str'),
state=dict(choices=[
'present',
'absent',
], required=True, type='str'),
),
supports_check_mode=True,
)
result = {
'name': module.params['name'],
'changed': False,
'msg': ""
}
# Find commandline strings
mkitab = module.get_bin_path('mkitab')
rmitab = module.get_bin_path('rmitab')
chitab = module.get_bin_path('chitab')
rc = 0
# check if the new entry exists
current_entry = check_current_entry(module)
# if action is install or change,
if module.params['state'] == 'present':
# create new entry string
new_entry = module.params['name'] + ":" + module.params['runlevel'] + \
":" + module.params['action'] + ":" + module.params['command']
# If current entry exists or fields are different(if the entry does not
# exists, then the entry wil be created
if (not current_entry['exist']) or (
module.params['runlevel'] != current_entry['runlevel'] or
module.params['action'] != current_entry['action'] or
module.params['command'] != current_entry['command']):
# If the entry does exist then change the entry
if current_entry['exist']:
if not module.check_mode:
(rc, out, err) = module.run_command([chitab, new_entry])
if rc != 0:
module.fail_json(
msg="could not change inittab", rc=rc, err=err)
result['msg'] = "changed inittab entry" + " " + current_entry['name']
result['changed'] = True
# If the entry does not exist create the entry
elif not current_entry['exist']:
if module.params['insertafter']:
if not module.check_mode:
(rc, out, err) = module.run_command(
[mkitab, '-i', module.params['insertafter'], new_entry])
else:
if not module.check_mode:
(rc, out, err) = module.run_command(
[mkitab, new_entry])
if rc != 0:
module.fail_json(msg="could not adjust inittab", rc=rc, err=err)
result['msg'] = "add inittab entry" + " " + module.params['name']
result['changed'] = True
elif module.params['state'] == 'absent':
# If the action is remove and the entry exists then remove the entry
if current_entry['exist']:
if not module.check_mode:
(rc, out, err) = module.run_command(
[rmitab, module.params['name']])
if rc != 0:
module.fail_json(
msg="could not remove entry grom inittab)", rc=rc, err=err)
result['msg'] = "removed inittab entry" + " " + current_entry['name']
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,144,436,396,497,166,000 | -6,665,716,770,784,180,000 | 29.489879 | 122 | 0.56128 | false |
catapult-project/catapult | telemetry/telemetry/internal/backends/chrome/android_browser_finder.py | 3 | 20134 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds android browsers that can be started and controlled by telemetry."""
from __future__ import absolute_import
import contextlib
import logging
import os
import platform
import posixpath
import shutil
import subprocess
from devil import base_error
from devil.android import apk_helper
from devil.android import flag_changer
from devil.android.sdk import version_codes
from py_utils import dependency_util
from py_utils import file_util
from py_utils import tempfile_ext
from telemetry import compat_mode_options
from telemetry import decorators
from telemetry.core import exceptions
from telemetry.core import platform as telemetry_platform
from telemetry.core import util
from telemetry.internal.backends import android_browser_backend_settings
from telemetry.internal.backends.chrome import android_browser_backend
from telemetry.internal.backends.chrome import chrome_startup_args
from telemetry.internal.browser import browser
from telemetry.internal.browser import possible_browser
from telemetry.internal.platform import android_device
from telemetry.internal.util import binary_manager
from telemetry.internal.util import format_for_logging
from telemetry.internal.util import local_first_binary_manager
ANDROID_BACKEND_SETTINGS = (
android_browser_backend_settings.ANDROID_BACKEND_SETTINGS)
@contextlib.contextmanager
def _ProfileWithExtraFiles(profile_dir, profile_files_to_copy):
"""Yields a temporary directory populated with input files.
Args:
profile_dir: A directory whose contents will be copied to the output
directory.
profile_files_to_copy: A list of (source, dest) tuples to be copied to
the output directory.
Yields: A path to a temporary directory, named "_default_profile". This
directory will be cleaned up when this context exits.
"""
with tempfile_ext.NamedTemporaryDirectory() as tempdir:
# TODO(csharrison): "_default_profile" was chosen because this directory
# will be pushed to the device's sdcard. We don't want to choose a
# random name due to the extra failure mode of filling up the sdcard
# in the case of unclean test teardown. We should consider changing
# PushProfile to avoid writing to this intermediate location.
host_profile = os.path.join(tempdir, '_default_profile')
if profile_dir:
shutil.copytree(profile_dir, host_profile)
else:
os.mkdir(host_profile)
# Add files from |profile_files_to_copy| into the host profile
# directory. Don't copy files if they already exist.
for source, dest in profile_files_to_copy:
host_path = os.path.join(host_profile, dest)
if not os.path.exists(host_path):
file_util.CopyFileWithIntermediateDirectories(source, host_path)
yield host_profile
class PossibleAndroidBrowser(possible_browser.PossibleBrowser):
"""A launchable android browser instance."""
def __init__(self, browser_type, finder_options, android_platform,
backend_settings, local_apk=None, target_os='android'):
super(PossibleAndroidBrowser, self).__init__(
browser_type, target_os, backend_settings.supports_tab_control)
assert browser_type in FindAllBrowserTypes(), (
'Please add %s to android_browser_finder.FindAllBrowserTypes' %
browser_type)
self._platform = android_platform
self._platform_backend = (
android_platform._platform_backend) # pylint: disable=protected-access
self._backend_settings = backend_settings
self._local_apk = local_apk
self._flag_changer = None
self._modules_to_install = None
self._compile_apk = finder_options.compile_apk
if self._local_apk is None and finder_options.chrome_root is not None:
self._local_apk = self._backend_settings.FindLocalApk(
self._platform_backend.device, finder_options.chrome_root)
# At this point the local_apk, if any, must exist.
assert self._local_apk is None or os.path.exists(self._local_apk)
self._build_dir = util.GetBuildDirFromHostApkPath(self._local_apk)
if finder_options.modules_to_install:
self._modules_to_install = set(['base'] +
finder_options.modules_to_install)
self._support_apk_list = []
if (self._backend_settings.requires_embedder or
self._backend_settings.has_additional_apk):
if finder_options.webview_embedder_apk:
self._support_apk_list = finder_options.webview_embedder_apk
else:
self._support_apk_list = self._backend_settings.FindSupportApks(
self._local_apk, finder_options.chrome_root)
elif finder_options.webview_embedder_apk:
logging.warning(
'No embedder needed for %s, ignoring --webview-embedder-apk option',
self._backend_settings.browser_type)
# At this point the apks in _support_apk_list, if any, must exist.
for apk in self._support_apk_list:
assert os.path.exists(apk)
def __repr__(self):
return 'PossibleAndroidBrowser(browser_type=%s)' % self.browser_type
@property
def settings(self):
"""Get the backend_settings for this possible browser."""
return self._backend_settings
@property
def browser_directory(self):
# On Android L+ the directory where base APK resides is also used for
# keeping extracted native libraries and .odex. Here is an example layout:
# /data/app/$package.apps.chrome-1/
# base.apk
# lib/arm/libchrome.so
# oat/arm/base.odex
# Declaring this toplevel directory as 'browser_directory' allows the cold
# startup benchmarks to flush OS pagecache for the native library, .odex and
# the APK.
apks = self._platform_backend.device.GetApplicationPaths(
self._backend_settings.package)
# A package can map to multiple APKs if the package overrides the app on
# the system image. Such overrides should not happen on perf bots. The
# package can also map to multiple apks if splits are used. In all cases, we
# want the directory that contains base.apk.
for apk in apks:
if apk.endswith('/base.apk'):
return apk[:-9]
return None
@property
def profile_directory(self):
return self._platform_backend.GetProfileDir(self._backend_settings.package)
@property
def last_modification_time(self):
if self._local_apk:
return os.path.getmtime(self._local_apk)
return -1
def _GetPathsForOsPageCacheFlushing(self):
return [self.profile_directory, self.browser_directory]
def _InitPlatformIfNeeded(self):
pass
def _SetupProfile(self):
if self._browser_options.dont_override_profile:
return
# Just remove the existing profile if we don't have any files to copy over.
# This is because PushProfile does not support pushing completely empty
# directories.
profile_files_to_copy = self._browser_options.profile_files_to_copy
if not self._browser_options.profile_dir and not profile_files_to_copy:
self._platform_backend.RemoveProfile(
self._backend_settings.package,
self._backend_settings.profile_ignore_list)
return
with _ProfileWithExtraFiles(self._browser_options.profile_dir,
profile_files_to_copy) as profile_dir:
self._platform_backend.PushProfile(self._backend_settings.package,
profile_dir)
def SetUpEnvironment(self, browser_options):
super(PossibleAndroidBrowser, self).SetUpEnvironment(browser_options)
self._platform_backend.DismissCrashDialogIfNeeded()
device = self._platform_backend.device
startup_args = self.GetBrowserStartupArgs(self._browser_options)
device.adb.Logcat(clear=True)
# use legacy commandline path if in compatibility mode
self._flag_changer = flag_changer.FlagChanger(
device, self._backend_settings.command_line_name, use_legacy_path=
compat_mode_options.LEGACY_COMMAND_LINE_PATH in
browser_options.compatibility_mode)
self._flag_changer.ReplaceFlags(startup_args, log_flags=False)
formatted_args = format_for_logging.ShellFormat(
startup_args, trim=browser_options.trim_logs)
logging.info('Flags set on device were %s', formatted_args)
# Stop any existing browser found already running on the device. This is
# done *after* setting the command line flags, in case some other Android
# process manages to trigger Chrome's startup before we do.
self._platform_backend.StopApplication(self._backend_settings.package)
self._SetupProfile()
# Remove any old crash dumps
self._platform_backend.device.RemovePath(
self._platform_backend.GetDumpLocation(self._backend_settings.package),
recursive=True, force=True)
def _TearDownEnvironment(self):
self._RestoreCommandLineFlags()
def _RestoreCommandLineFlags(self):
if self._flag_changer is not None:
try:
self._flag_changer.Restore()
finally:
self._flag_changer = None
def Create(self):
"""Launch the browser on the device and return a Browser object."""
return self._GetBrowserInstance(existing=False)
def FindExistingBrowser(self):
"""Find a browser running on the device and bind a Browser object to it.
The returned Browser object will only be bound to a running browser
instance whose package name matches the one specified by the backend
settings of this possible browser.
A BrowserGoneException is raised if the browser cannot be found.
"""
return self._GetBrowserInstance(existing=True)
def _GetBrowserInstance(self, existing):
# Init the LocalFirstBinaryManager if this is the first time we're creating
# a browser. Note that we use the host's OS and architecture since the
# retrieved dependencies are used on the host, not the device.
if local_first_binary_manager.LocalFirstBinaryManager.NeedsInit():
local_first_binary_manager.LocalFirstBinaryManager.Init(
self._build_dir, self._local_apk, platform.system().lower(),
platform.machine())
browser_backend = android_browser_backend.AndroidBrowserBackend(
self._platform_backend, self._browser_options,
self.browser_directory, self.profile_directory,
self._backend_settings,
build_dir=self._build_dir)
try:
return browser.Browser(
browser_backend, self._platform_backend, startup_args=(),
find_existing=existing)
except Exception:
browser_backend.Close()
raise
def GetBrowserStartupArgs(self, browser_options):
startup_args = chrome_startup_args.GetFromBrowserOptions(browser_options)
# use the flag `--ignore-certificate-errors` if in compatibility mode
supports_spki_list = (
self._backend_settings.supports_spki_list and
compat_mode_options.IGNORE_CERTIFICATE_ERROR
not in browser_options.compatibility_mode)
startup_args.extend(chrome_startup_args.GetReplayArgs(
self._platform_backend.network_controller_backend,
supports_spki_list=supports_spki_list))
startup_args.append('--enable-remote-debugging')
startup_args.append('--disable-fre')
startup_args.append('--disable-external-intent-requests')
# Need to specify the user profile directory for
# --ignore-certificate-errors-spki-list to work.
startup_args.append('--user-data-dir=' + self.profile_directory)
# Needed so that non-browser-process crashes avoid automatic dump upload
# and subsequent deletion. The extra "Crashpad" is necessary because
# crashpad_stackwalker.py is hard-coded to look for a "Crashpad" directory
# in the dump directory that it is provided.
startup_args.append('--breakpad-dump-location=' + posixpath.join(
self._platform_backend.GetDumpLocation(self._backend_settings.package),
'Crashpad'))
return startup_args
def SupportsOptions(self, browser_options):
if len(browser_options.extensions_to_load) != 0:
return False
return True
def IsAvailable(self):
"""Returns True if the browser is or can be installed on the platform."""
has_local_apks = self._local_apk and (
not self._backend_settings.requires_embedder or self._support_apk_list)
return has_local_apks or self.platform.CanLaunchApplication(
self.settings.package)
@decorators.Cache
def UpdateExecutableIfNeeded(self):
# TODO(crbug.com/815133): This logic should belong to backend_settings.
for apk in self._support_apk_list:
logging.warn('Installing %s on device if needed.', apk)
self.platform.InstallApplication(apk)
apk_name = self._backend_settings.GetApkName(
self._platform_backend.device)
is_webview_apk = apk_name is not None and ('SystemWebView' in apk_name or
'system_webview' in apk_name or
'TrichromeWebView' in apk_name or
'trichrome_webview' in apk_name)
# The WebView fallback logic prevents sideloaded WebView APKs from being
# installed and set as the WebView implementation correctly. Disable the
# fallback logic before installing the WebView APK to make sure the fallback
# logic doesn't interfere.
if is_webview_apk:
self._platform_backend.device.SetWebViewFallbackLogic(False)
if self._local_apk:
logging.warn('Installing %s on device if needed.', self._local_apk)
self.platform.InstallApplication(
self._local_apk, modules=self._modules_to_install)
if self._compile_apk:
package_name = apk_helper.GetPackageName(self._local_apk)
logging.warn('Compiling %s.', package_name)
self._platform_backend.device.RunShellCommand(
['cmd', 'package', 'compile', '-m', self._compile_apk, '-f',
package_name],
check_return=True)
sdk_version = self._platform_backend.device.build_version_sdk
# Bundles are in the ../bin directory, so it's safer to just check the
# correct name is part of the path.
is_monochrome = apk_name is not None and (apk_name == 'Monochrome.apk' or
'monochrome_bundle' in apk_name)
if ((is_webview_apk or
(is_monochrome and sdk_version < version_codes.Q)) and
sdk_version >= version_codes.NOUGAT):
package_name = apk_helper.GetPackageName(self._local_apk)
logging.warn('Setting %s as WebView implementation.', package_name)
self._platform_backend.device.SetWebViewImplementation(package_name)
def GetTypExpectationsTags(self):
tags = super(PossibleAndroidBrowser, self).GetTypExpectationsTags()
if 'webview' in self.browser_type:
tags.append('android-webview')
else:
tags.append('android-not-webview')
if 'weblayer' in self.browser_type:
tags.append('android-weblayer')
return tags
def SelectDefaultBrowser(possible_browsers):
"""Return the newest possible browser."""
if not possible_browsers:
return None
return max(possible_browsers, key=lambda b: b.last_modification_time)
def CanFindAvailableBrowsers():
return android_device.CanDiscoverDevices()
def _CanPossiblyHandlePath(apk_path):
if not apk_path:
return False
try:
apk_helper.ToHelper(apk_path)
return True
except apk_helper.ApkHelperError:
return False
def FindAllBrowserTypes():
browser_types = [b.browser_type for b in ANDROID_BACKEND_SETTINGS]
return browser_types + ['exact', 'reference']
def _FetchReferenceApk(android_platform, is_bundle=False):
"""Fetch the apk for reference browser type from gcloud.
Local path to the apk will be returned upon success.
Otherwise, None will be returned.
"""
os_version = dependency_util.GetChromeApkOsVersion(
android_platform.GetOSVersionName())
if is_bundle:
os_version += '_bundle'
arch = android_platform.GetArchName()
try:
reference_build = binary_manager.FetchPath(
'chrome_stable', 'android', arch, os_version)
if reference_build and os.path.exists(reference_build):
return reference_build
except binary_manager.NoPathFoundError:
logging.warning('Cannot find path for reference apk for device %s',
android_platform.GetDeviceId())
except binary_manager.CloudStorageError:
logging.warning('Failed to download reference apk for device %s',
android_platform.GetDeviceId())
return None
def _GetReferenceAndroidBrowser(android_platform, finder_options):
reference_build = _FetchReferenceApk(android_platform)
if reference_build:
return PossibleAndroidBrowser(
'reference',
finder_options,
android_platform,
android_browser_backend_settings.ANDROID_CHROME,
reference_build)
def _FindAllPossibleBrowsers(finder_options, android_platform):
"""Testable version of FindAllAvailableBrowsers."""
if not android_platform:
return []
possible_browsers = []
for apk in finder_options.webview_embedder_apk:
if not os.path.exists(apk):
raise exceptions.PathMissingError(
'Unable to find apk specified by --webview-embedder-apk=%s' % apk)
# Add the exact APK if given.
if _CanPossiblyHandlePath(finder_options.browser_executable):
if not os.path.exists(finder_options.browser_executable):
raise exceptions.PathMissingError(
'Unable to find exact apk specified by --browser-executable=%s' %
finder_options.browser_executable)
package_name = apk_helper.GetPackageName(finder_options.browser_executable)
try:
backend_settings = next(
b for b in ANDROID_BACKEND_SETTINGS if b.package == package_name)
except StopIteration:
raise exceptions.UnknownPackageError(
'%s specified by --browser-executable has an unknown package: %s' %
(finder_options.browser_executable, package_name))
possible_browsers.append(PossibleAndroidBrowser(
'exact',
finder_options,
android_platform,
backend_settings,
finder_options.browser_executable))
if finder_options.IsBrowserTypeRelevant('reference'):
reference_browser = _GetReferenceAndroidBrowser(
android_platform, finder_options)
if reference_browser:
possible_browsers.append(reference_browser)
# Add any other known available browsers.
for settings in ANDROID_BACKEND_SETTINGS:
if finder_options.IsBrowserTypeRelevant(settings.browser_type):
local_apk = None
if finder_options.IsBrowserTypeReference():
local_apk = _FetchReferenceApk(
android_platform, finder_options.IsBrowserTypeBundle())
if settings.IsWebView():
p_browser = PossibleAndroidBrowser(
settings.browser_type, finder_options, android_platform, settings,
local_apk=local_apk, target_os='android_webview')
else:
p_browser = PossibleAndroidBrowser(
settings.browser_type, finder_options, android_platform, settings,
local_apk=local_apk)
if p_browser.IsAvailable():
possible_browsers.append(p_browser)
return possible_browsers
def FindAllAvailableBrowsers(finder_options, device):
"""Finds all the possible browsers on one device.
The device is either the only device on the host platform,
or |finder_options| specifies a particular device.
"""
if not isinstance(device, android_device.AndroidDevice):
return []
try:
android_platform = telemetry_platform.GetPlatformForDevice(
device, finder_options)
return _FindAllPossibleBrowsers(finder_options, android_platform)
except base_error.BaseError as e:
logging.error('Unable to find browsers on %s: %s', device.device_id, str(e))
ps_output = subprocess.check_output(['ps', '-ef'])
logging.error('Ongoing processes:\n%s', ps_output)
return []
| bsd-3-clause | -7,051,872,306,199,934,000 | -2,971,550,088,104,400,400 | 39.348697 | 80 | 0.703387 | false |
eg-zhang/h2o-2 | py/testdir_multi_jvm/test_GLM2_covtype_exec.py | 9 | 2344 | import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(3,java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_covtype_exec(self):
csvFilename = 'covtype.data'
csvPathname = 'standard/' + csvFilename
hex_key = 'covtype.hex'
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put',
hex_key=hex_key, timeoutSecs=30)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
print "WARNING: max_iter set to 8 for benchmark comparisons"
max_iter = 8
y = "54"
h2o_cmd.runExec(str='%s[,55] = %s[,55]==1' % (hex_key, hex_key))
# L2
kwargs = {
'response': y,
'family': 'binomial',
'n_folds': 0,
'max_iter': max_iter,
'beta_epsilon': 1e-3}
timeoutSecs = 120
start = time.time()
kwargs.update({'alpha': 0, 'lambda': 0})
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm (L2) end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, 'C14', **kwargs)
# Elastic
kwargs.update({'alpha': 0.5, 'lambda': 1e-4})
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm (Elastic) end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, 'C14', **kwargs)
# L1
kwargs.update({'alpha': 1, 'lambda': 1e-4})
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm (L1) end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, 'C14', **kwargs)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 | -8,117,153,471,619,720,000 | 2,049,592,316,630,787,800 | 33.470588 | 101 | 0.567406 | false |
avneesh91/django | tests/auth_tests/test_forms.py | 15 | 34291 | import datetime
import re
from unittest import mock
from django import forms
from django.contrib.auth.forms import (
AdminPasswordChangeForm, AuthenticationForm, PasswordChangeForm,
PasswordResetForm, ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget,
SetPasswordForm, UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_login_failed
from django.contrib.sites.models import Site
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import CharField, Field, IntegerField
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from .models.custom_user import (
CustomUser, CustomUserWithoutIsActiveField, ExtensionUser,
)
from .models.with_custom_email_field import CustomEmailField
from .models.with_integer_username import IntegerUsernameUser
from .settings import AUTH_TEMPLATES
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password', email='[email protected]')
cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False)
cls.u3 = User.objects.create_user(username='staff', password='password')
cls.u4 = User.objects.create(username='empty_password', password='')
cls.u5 = User.objects.create(username='unmanageable_password', password='$')
cls.u6 = User.objects.create(username='unknown_password', password='foo$bar')
class UserCreationFormTest(TestDataMixin, TestCase):
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[str(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[str(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [str(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
data = {
'username': '[email protected]',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
u = form.save()
self.assertEqual(password_changed.call_count, 1)
self.assertEqual(repr(u), '<User: [email protected]>')
def test_unicode_username(self):
data = {
'username': '宝',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(u.username, '宝')
def test_normalize_username(self):
# The normalization happens in AbstractBaseUser.clean() and ModelForm
# validation calls Model.clean().
ohm_username = 'testΩ' # U+2126 OHM SIGN
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertNotEqual(user.username, ohm_username)
self.assertEqual(user.username, 'testΩ') # U+03A9 GREEK CAPITAL LETTER OMEGA
def test_duplicate_normalized_unicode(self):
"""
To prevent almost identical usernames, visually identical but differing
by their unicode code points only, Unicode NFKC normalization should
make appear them equal to Django.
"""
omega_username = 'iamtheΩ' # U+03A9 GREEK CAPITAL LETTER OMEGA
ohm_username = 'iamtheΩ' # U+2126 OHM SIGN
self.assertNotEqual(omega_username, ohm_username)
User.objects.create_user(username=omega_username, password='pwd')
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['username'], ["A user with that username already exists."]
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form['password2'].errors), 2)
self.assertIn('The password is too similar to the username.', form['password2'].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form['password2'].errors
)
def test_custom_form(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = ExtensionUser
fields = UserCreationForm.Meta.fields + ('date_of_birth',)
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_with_different_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = ('email', 'date_of_birth')
data = {
'email': '[email protected]',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_hidden_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUserWithoutIsActiveField
fields = ('email',) # without USERNAME_FIELD
data = {
'email': '[email protected]',
'password1': 'testclient',
'password2': 'testclient',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password1': ' testpassword ',
'password2': ' testpassword ',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
])
def test_password_help_text(self):
form = UserCreationForm()
self.assertEqual(
form.fields['password1'].help_text,
'<ul><li>Your password can't be too similar to your other personal information.</li></ul>'
)
# To verify that the login form rejects inactive users, use an authentication
# backend that allows them.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend'])
class AuthenticationFormTest(TestDataMixin, TestCase):
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.non_field_errors(), [
form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
}
]
)
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
def test_login_failed(self):
signal_calls = []
def signal_handler(**kwargs):
signal_calls.append(kwargs)
user_login_failed.connect(signal_handler)
fake_request = object()
try:
form = AuthenticationForm(fake_request, {
'username': 'testclient',
'password': 'incorrect',
})
self.assertFalse(form.is_valid())
self.assertIs(signal_calls[0]['request'], fake_request)
finally:
user_login_failed.disconnect(signal_handler)
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_unicode_username(self):
User.objects.create_user(username='Σαρα', password='pwd')
data = {
'username': 'Σαρα',
'password': 'pwd',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password': ' pass ',
}
form = AuthenticationForm(None, data)
form.is_valid() # Not necessary to have valid credentails for the test.
self.assertEqual(form.cleaned_data['password'], data['password'])
@override_settings(AUTH_USER_MODEL='auth_tests.IntegerUsernameUser')
def test_integer_username(self):
class CustomAuthenticationForm(AuthenticationForm):
username = IntegerField()
user = IntegerUsernameUser.objects.create_user(username=0, password='pwd')
data = {
'username': 0,
'password': 'pwd',
}
form = CustomAuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['username'], data['username'])
self.assertEqual(form.cleaned_data['password'], data['password'])
self.assertEqual(form.errors, {})
self.assertEqual(form.user_cache, user)
class SetPasswordFormTest(TestDataMixin, TestCase):
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form["new_password2"].errors,
[str(form.error_messages['password_mismatch'])]
)
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'testclient',
'new_password2': 'testclient',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form["new_password2"].errors), 2)
self.assertIn('The password is too similar to the username.', form["new_password2"].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form["new_password2"].errors
)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': ' password ',
'new_password2': ' password ',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_help_text_translation(self):
french_help_texts = [
'Votre mot de passe ne peut pas trop ressembler à vos autres informations personnelles.',
'Votre mot de passe doit contenir au minimum 12 caractères.',
]
form = SetPasswordForm(self.u1)
with translation.override('fr'):
html = form.as_p()
for french_text in french_help_texts:
self.assertIn(french_text, html)
class PasswordChangeFormTest(TestDataMixin, TestCase):
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors, [str(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors, [str(form.error_messages['password_mismatch'])])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields), ['old_password', 'new_password1', 'new_password2'])
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
user.set_password(' oldpassword ')
data = {
'old_password': ' oldpassword ',
'new_password1': ' pass ',
'new_password2': ' pass ',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['old_password'], data['old_password'])
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
class UserChangeFormTest(TestDataMixin, TestCase):
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unusable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
# original hashed password contains $
self.assertIn('$', form.cleaned_data['password'])
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
def test_custom_form(self):
class CustomUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = ExtensionUser
fields = ('username', 'password', 'date_of_birth',)
user = User.objects.get(username='testclient')
data = {
'username': 'testclient',
'password': 'testclient',
'date_of_birth': '1998-02-24',
}
form = CustomUserChangeForm(data, instance=user)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['username'], 'testclient')
self.assertEqual(form.cleaned_data['date_of_birth'], datetime.date(1998, 2, 24))
@override_settings(TEMPLATES=AUTH_TEMPLATES)
class PasswordResetFormTest(TestDataMixin, TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = '[email protected]'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': '[email protected]'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': '[email protected]'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
data = {'email': '[email protected]'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['[email protected]'],
headers={'Reply-To': '[email protected]'},
alternatives=[
("Really sorry to hear you forgot your password.", "text/html")
],
).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['[email protected]'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', '[email protected]', 'test')
self.assertEqual(user.email, '[email protected]')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', '[email protected]', 'test')
data = {"email": "[email protected]"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(re.match(
r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload()
))
@override_settings(AUTH_USER_MODEL='auth_tests.CustomEmailField')
def test_custom_email_field(self):
email = '[email protected]'
CustomEmailField.objects.create_user('test name', 'test password', email)
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [email])
class ReadOnlyPasswordHashTest(SimpleTestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
class AdminPasswordChangeFormTest(TestDataMixin, TestCase):
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'password1': 'test123',
'password2': 'test123',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'password1': ' pass ',
'password2': ' pass ',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
| bsd-3-clause | 3,040,025,030,073,417,000 | -5,957,071,213,067,693,000 | 38.94289 | 119 | 0.616731 | false |
vipulsabhaya/cue | cue/openstack/common/policy.py | 1 | 28680 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
It is possible to perform policy checks on the following user
attributes (obtained through the token): user_id, domain_id or
project_id::
domain_id:<some_value>
Attributes sent along with API calls can be used by the policy engine
(on the right side of the expression), by using the following syntax::
<some_value>:user.id
Contextual attributes of objects identified by their IDs are loaded
from the database. They are also available to the policy engine and
can be checked through the `target` keyword::
<some_value>:target.role.name
All these attributes (related to users, API calls, and context) can be
checked against each other or against constants, be it literals (True,
<a_number>) or strings.
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import ast
import os
import re
from oslo.config import cfg
from oslo.serialization import jsonutils
import six
import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
from cue.openstack.common import fileutils
from cue.openstack.common._i18n import _, _LE, _LW
from cue.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('The JSON file that defines policies.')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Default rule. Enforced when a requested rule is not '
'found.')),
cfg.MultiStrOpt('policy_dirs',
default=['policy.d'],
help=_('Directories where policy configuration files are '
'stored')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
LOG = logging.getLogger(__name__)
_checks = {}
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
msg = _("Policy doesn't allow %s to be performed.") % rule
super(PolicyNotAuthorized, self).__init__(msg)
class Rules(dict):
"""A store for rules. Handles the default_rule setting directly."""
@classmethod
def load_json(cls, data, default_rule=None):
"""Allow loading of JSON rule data."""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
if isinstance(self.default_rule, dict):
raise KeyError(key)
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule:
raise KeyError(key)
if isinstance(self.default_rule, BaseCheck):
return self.default_rule
# We need to check this or we can get infinite recursion
if self.default_rule not in self:
raise KeyError(key)
elif isinstance(self.default_rule, six.string_types):
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
class Enforcer(object):
"""Responsible for loading and enforcing rules.
:param policy_file: Custom policy file to use, if none is
specified, `CONF.policy_file` will be
used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation. If
`load_rules(True)`, `clear()` or `set_rules(True)`
is called this will be overwritten.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from cache or config file.
"""
def __init__(self, policy_file=None, rules=None,
default_rule=None, use_conf=True):
self.rules = Rules(rules, default_rule)
self.default_rule = default_rule or CONF.policy_default_rule
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
self.use_conf = use_conf
def set_rules(self, rules, overwrite=True, use_conf=False):
"""Create a new Rules object based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from cache or config file.
"""
if not isinstance(rules, dict):
raise TypeError(_("Rules must be an instance of dict or Rules, "
"got %s instead") % type(rules))
self.use_conf = use_conf
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules)
def clear(self):
"""Clears Enforcer rules, policy's cache and policy's path."""
self.set_rules({})
fileutils.delete_cached_file(self.policy_path)
self.default_rule = None
self.policy_path = None
def load_rules(self, force_reload=False):
"""Loads policy_path's rules.
Policy file is cached and will be reloaded if modified.
:param force_reload: Whether to overwrite current rules.
"""
if force_reload:
self.use_conf = force_reload
if self.use_conf:
if not self.policy_path:
self.policy_path = self._get_policy_path(self.policy_file)
self._load_policy_file(self.policy_path, force_reload)
for path in CONF.policy_dirs:
try:
path = self._get_policy_path(path)
except cfg.ConfigFilesNotFoundError:
LOG.warn(_LW("Can not find policy directories %s"), path)
continue
self._walk_through_policy_directory(path,
self._load_policy_file,
force_reload, False)
def _walk_through_policy_directory(self, path, func, *args):
# We do not iterate over sub-directories.
policy_files = next(os.walk(path))[2]
policy_files.sort()
for policy_file in [p for p in policy_files if not p.startswith('.')]:
func(os.path.join(path, policy_file), *args)
def _load_policy_file(self, path, force_reload, overwrite=True):
reloaded, data = fileutils.read_cached_file(
path, force_reload=force_reload)
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules, overwrite)
LOG.debug("Rules successfully reloaded")
def _get_policy_path(self, path):
"""Locate the policy json data file/path.
:param path: It's value can be a full path or related path. When
full path specified, this function just returns the full
path. When related path specified, this function will
search configuration directories to find one that exists.
:returns: The policy path
:raises: ConfigFilesNotFoundError if the file/path couldn't
be located.
"""
policy_path = CONF.find_file(path)
if policy_path:
return policy_path
raise cfg.ConfigFilesNotFoundError((path,))
def enforce(self, rule, target, creds, do_raise=False,
exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials.
:param rule: A string or BaseCheck instance specifying the rule
to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param do_raise: Whether to raise an exception or not if check
fails.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to enforce() (both
positional and keyword arguments) will be passed to
the exception class. If not specified, PolicyNotAuthorized
will be used.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
self.load_rules()
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds, self)
elif not self.rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self)
except KeyError:
LOG.debug("Rule [%s] doesn't exist" % rule)
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if do_raise and not result:
if exc:
raise exc(*args, **kwargs)
raise PolicyNotAuthorized(rule)
return result
@six.add_metaclass(abc.ABCMeta)
class BaseCheck(object):
"""Abstract base class for Check classes."""
@abc.abstractmethod
def __str__(self):
"""String representation of the Check tree rooted at this node."""
pass
@abc.abstractmethod
def __call__(self, target, cred, enforcer):
"""Triggers if instance of the class is called.
Performs the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""A policy check that always returns False (disallow)."""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""A policy check that always returns True (allow)."""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return True
class Check(BaseCheck):
"""A base class to allow for user-defined policy checks."""
def __init__(self, kind, match):
"""Initiates Check instance.
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""Implements the "not" logical operator.
A policy check that inverts the result of another policy check.
"""
def __init__(self, rule):
"""Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred, enforcer):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred, enforcer)
class AndCheck(BaseCheck):
"""Implements the "and" logical operator.
A policy check that requires that a list of other checks all return True.
"""
def __init__(self, rules):
"""Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that all rules accept in order to return True.
"""
for rule in self.rules:
if not rule(target, cred, enforcer):
return False
return True
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""Implements the "or" operator.
A policy check that requires that at least one of a list of other
checks returns True.
"""
def __init__(self, rules):
"""Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that at least one rule accept in order to return True.
"""
for rule in self.rules:
if rule(target, cred, enforcer):
return True
return False
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_LE("Failed to understand rule %s") % rule)
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_LE("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""Translates the old list-of-lists syntax into a tree of Check objects.
Provided for backwards compatibility.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, six.string_types):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if not or_list:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""Metaclass for the ParseState class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""Create the class.
Injects the 'reducers' list, a list of tuples matching token sequences
to the names of the corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, in order, which should trigger running
this reduction method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
@six.add_metaclass(ParseStateMeta)
class ParseState(object):
"""Implement the core of parsing the policy language.
Uses a greedy reduction algorithm to reduce a sequence of tokens into
a single terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""Perform a greedy reduction of the token stream.
If a reducer method matches, it will be executed, then the
reduce() method will be called recursively to search for any more
possible reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""Obtain the final result of the parse.
Raises ValueError if the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'.
Join two checks by the 'and' operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding one more check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'.
Join two checks by the 'or' operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding one more check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""Parses policy to the tree.
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_LE("Failed to understand rule %s") % rule)
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""Parses a policy rule into a tree of Check objects."""
# If the rule is a string, it's in the policy language
if isinstance(rule, six.string_types):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Recursively checks credentials based on the defined rules."""
try:
return enforcer.rules[self.match](target, creds, enforcer)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urlparse.urlencode(data)
f = urlrequest.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
True:%(user.enabled)s
'Member':%(role.name)s
"""
try:
match = self.match % target
except KeyError:
# While doing GenericCheck if key not
# present in Target return false
return False
try:
# Try to interpret self.kind as a literal
leftval = ast.literal_eval(self.kind)
except ValueError:
try:
kind_parts = self.kind.split('.')
leftval = creds
for kind_part in kind_parts:
leftval = leftval[kind_part]
except KeyError:
return False
return match == six.text_type(leftval)
| apache-2.0 | -3,627,906,700,228,967,000 | 7,673,191,998,545,419,000 | 30.038961 | 78 | 0.598326 | false |
JioCloud/nova_test_latest | nova/pci/whitelist.py | 35 | 4103 | # Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova import exception
from nova.i18n import _
from nova.pci import devspec
pci_opts = [cfg.MultiStrOpt('pci_passthrough_whitelist',
default=[],
help='White list of PCI devices available to VMs. '
'For example: pci_passthrough_whitelist = '
'[{"vendor_id": "8086", "product_id": "0443"}]'
)
]
CONF = cfg.CONF
CONF.register_opts(pci_opts)
LOG = logging.getLogger(__name__)
class PciHostDevicesWhiteList(object):
"""White list class to decide assignable pci devices.
Not all devices on compute node can be assigned to guest, the
cloud administrator decides the devices that can be assigned
based on vendor_id or product_id etc. If no white list specified,
no device will be assignable.
"""
def _parse_white_list_from_config(self, whitelists):
"""Parse and validate the pci whitelist from the nova config."""
specs = []
for jsonspec in whitelists:
try:
dev_spec = jsonutils.loads(jsonspec)
except ValueError:
raise exception.PciConfigInvalidWhitelist(
reason=_("Invalid entry: '%s'") % jsonspec)
if isinstance(dev_spec, dict):
dev_spec = [dev_spec]
elif not isinstance(dev_spec, list):
raise exception.PciConfigInvalidWhitelist(
reason=_("Invalid entry: '%s'; "
"Expecting list or dict") % jsonspec)
for ds in dev_spec:
if not isinstance(ds, dict):
raise exception.PciConfigInvalidWhitelist(
reason=_("Invalid entry: '%s'; "
"Expecting dict") % ds)
spec = devspec.PciDeviceSpec(ds)
specs.append(spec)
return specs
def __init__(self, whitelist_spec=None):
"""White list constructor
For example, followed json string specifies that devices whose
vendor_id is '8086' and product_id is '1520' can be assigned
to guest.
'[{"product_id":"1520", "vendor_id":"8086"}]'
:param whitelist_spec: A json string for a list of dictionaries,
each dictionary specifies the pci device
properties requirement.
"""
super(PciHostDevicesWhiteList, self).__init__()
if whitelist_spec:
self.specs = self._parse_white_list_from_config(whitelist_spec)
else:
self.specs = []
def device_assignable(self, dev):
"""Check if a device can be assigned to a guest.
:param dev: A dictionary describing the device properties
"""
for spec in self.specs:
if spec.match(dev):
return spec
def get_devspec(self, pci_dev):
for spec in self.specs:
if spec.match_pci_obj(pci_dev):
return spec
def get_pci_devices_filter():
return PciHostDevicesWhiteList(CONF.pci_passthrough_whitelist)
def get_pci_device_devspec(pci_dev):
dev_filter = get_pci_devices_filter()
return dev_filter.get_devspec(pci_dev)
| apache-2.0 | -8,288,692,504,394,842,000 | -6,614,250,446,292,562,000 | 34.991228 | 79 | 0.591762 | false |
cloudera/hue | desktop/core/ext-py/dnspython-1.15.0/tests/test_rdtypeanyeui.py | 4 | 9292 | # Copyright (C) 2015 Red Hat, Inc.
# Author: Petr Spacek <[email protected]>
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
try:
import unittest2 as unittest
except ImportError:
import unittest
from io import BytesIO
import dns.rrset
import dns.rdtypes.ANY.EUI48
import dns.rdtypes.ANY.EUI64
import dns.exception
class RdtypeAnyEUI48TestCase(unittest.TestCase):
def testInstOk(self):
'''Valid binary input.'''
eui = b'\x01\x23\x45\x67\x89\xab'
inst = dns.rdtypes.ANY.EUI48.EUI48(dns.rdataclass.IN,
dns.rdatatype.EUI48,
eui)
self.assertEqual(inst.eui, eui)
def testInstLength(self):
'''Incorrect input length.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd'
with self.assertRaises(dns.exception.FormError):
dns.rdtypes.ANY.EUI48.EUI48(dns.rdataclass.IN,
dns.rdatatype.EUI48,
eui)
def testFromTextOk(self):
'''Valid text input.'''
r1 = dns.rrset.from_text('foo', 300, 'IN', 'EUI48',
'01-23-45-67-89-ab')
eui = b'\x01\x23\x45\x67\x89\xab'
self.assertEqual(r1[0].eui, eui)
def testFromTextLength(self):
'''Invalid input length.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48',
'00-01-23-45-67-89-ab')
def testFromTextDelim(self):
'''Invalid delimiter.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48', '01_23-45-67-89-ab')
def testFromTextExtraDash(self):
'''Extra dash instead of hex digit.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48', '0--23-45-67-89-ab')
def testFromTextMultipleTokens(self):
'''Invalid input divided to multiple tokens.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48', '01 23-45-67-89-ab')
def testFromTextInvalidHex(self):
'''Invalid hexadecimal input.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI48', 'g0-23-45-67-89-ab')
def testToTextOk(self):
'''Valid text output.'''
eui = b'\x01\x23\x45\x67\x89\xab'
exp_text = '01-23-45-67-89-ab'
inst = dns.rdtypes.ANY.EUI48.EUI48(dns.rdataclass.IN,
dns.rdatatype.EUI48,
eui)
text = inst.to_text()
self.assertEqual(exp_text, text)
def testToWire(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89\xab'
inst = dns.rdtypes.ANY.EUI48.EUI48(dns.rdataclass.IN,
dns.rdatatype.EUI48,
eui)
buff = BytesIO()
inst.to_wire(buff)
self.assertEqual(buff.getvalue(), eui)
def testFromWireOk(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89\xab'
pad_len = 100
wire = dns.wiredata.WireData(b'x' * pad_len + eui + b'y' * pad_len * 2)
inst = dns.rdtypes.ANY.EUI48.EUI48.from_wire(dns.rdataclass.IN,
dns.rdatatype.EUI48,
wire,
pad_len,
len(eui))
self.assertEqual(inst.eui, eui)
def testFromWireLength(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89'
pad_len = 100
wire = dns.wiredata.WireData(b'x' * pad_len + eui + b'y' * pad_len * 2)
with self.assertRaises(dns.exception.FormError):
dns.rdtypes.ANY.EUI48.EUI48.from_wire(dns.rdataclass.IN,
dns.rdatatype.EUI48,
wire,
pad_len,
len(eui))
class RdtypeAnyEUI64TestCase(unittest.TestCase):
def testInstOk(self):
'''Valid binary input.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
inst = dns.rdtypes.ANY.EUI64.EUI64(dns.rdataclass.IN,
dns.rdatatype.EUI64,
eui)
self.assertEqual(inst.eui, eui)
def testInstLength(self):
'''Incorrect input length.'''
eui = b'\x01\x23\x45\x67\x89\xab'
with self.assertRaises(dns.exception.FormError):
dns.rdtypes.ANY.EUI64.EUI64(dns.rdataclass.IN,
dns.rdatatype.EUI64,
eui)
def testFromTextOk(self):
'''Valid text input.'''
r1 = dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'01-23-45-67-89-ab-cd-ef')
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
self.assertEqual(r1[0].eui, eui)
def testFromTextLength(self):
'''Invalid input length.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'01-23-45-67-89-ab')
def testFromTextDelim(self):
'''Invalid delimiter.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'01_23-45-67-89-ab-cd-ef')
def testFromTextExtraDash(self):
'''Extra dash instead of hex digit.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'0--23-45-67-89-ab-cd-ef')
def testFromTextMultipleTokens(self):
'''Invalid input divided to multiple tokens.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'01 23-45-67-89-ab-cd-ef')
def testFromTextInvalidHex(self):
'''Invalid hexadecimal input.'''
with self.assertRaises(dns.exception.SyntaxError):
dns.rrset.from_text('foo', 300, 'IN', 'EUI64',
'g0-23-45-67-89-ab-cd-ef')
def testToTextOk(self):
'''Valid text output.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
exp_text = '01-23-45-67-89-ab-cd-ef'
inst = dns.rdtypes.ANY.EUI64.EUI64(dns.rdataclass.IN,
dns.rdatatype.EUI64,
eui)
text = inst.to_text()
self.assertEqual(exp_text, text)
def testToWire(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
inst = dns.rdtypes.ANY.EUI64.EUI64(dns.rdataclass.IN,
dns.rdatatype.EUI64,
eui)
buff = BytesIO()
inst.to_wire(buff)
self.assertEqual(buff.getvalue(), eui)
def testFromWireOk(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89\xab\xcd\xef'
pad_len = 100
wire = dns.wiredata.WireData(b'x' * pad_len + eui + b'y' * pad_len * 2)
inst = dns.rdtypes.ANY.EUI64.EUI64.from_wire(dns.rdataclass.IN,
dns.rdatatype.EUI64,
wire,
pad_len,
len(eui))
self.assertEqual(inst.eui, eui)
def testFromWireLength(self):
'''Valid wire format.'''
eui = b'\x01\x23\x45\x67\x89'
pad_len = 100
wire = dns.wiredata.WireData(b'x' * pad_len + eui + b'y' * pad_len * 2)
with self.assertRaises(dns.exception.FormError):
dns.rdtypes.ANY.EUI64.EUI64.from_wire(dns.rdataclass.IN,
dns.rdatatype.EUI64,
wire,
pad_len,
len(eui))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 6,004,967,236,658,225,000 | -922,478,612,916,101,100 | 40.482143 | 79 | 0.512484 | false |
mgraupe/acq4 | acq4/filetypes/ImageFile.py | 4 | 5498 | # -*- coding: utf-8 -*-
from PIL import Image
## Install support for 16-bit images in PIL
if Image.VERSION == '1.1.7':
Image._MODE_CONV["I;16"] = ('%su2' % Image._ENDIAN, None)
Image._fromarray_typemap[((1, 1), "<u2")] = ("I", "I;16")
if Image.VERSION == '1.1.6':
Image._MODE_CONV["I;16"] = ('%su2' % Image._ENDIAN, None)
## just a copy of fromarray() from Image.py with I;16 added in
def fromarray(obj, mode=None):
arr = obj.__array_interface__
shape = arr['shape']
ndim = len(shape)
try:
strides = arr['strides']
except KeyError:
strides = None
if mode is None:
typestr = arr['typestr']
if not (typestr[0] == '|' or typestr[0] == Image._ENDIAN or
typestr[1:] not in ['u1', 'b1', 'i4', 'f4']):
raise TypeError("cannot handle data-type")
if typestr[0] == Image._ENDIAN:
typestr = typestr[1:3]
else:
typestr = typestr[:2]
if typestr == 'i4':
mode = 'I'
if typestr == 'u2':
mode = 'I;16'
elif typestr == 'f4':
mode = 'F'
elif typestr == 'b1':
mode = '1'
elif ndim == 2:
mode = 'L'
elif ndim == 3:
mode = 'RGB'
elif ndim == 4:
mode = 'RGBA'
else:
raise TypeError("Do not understand data.")
ndmax = 4
bad_dims=0
if mode in ['1','L','I','P','F']:
ndmax = 2
elif mode == 'RGB':
ndmax = 3
if ndim > ndmax:
raise ValueError("Too many dimensions.")
size = shape[:2][::-1]
if strides is not None:
obj = obj.tostring()
return frombuffer(mode, size, obj, "raw", mode, 0, 1)
Image.fromarray=fromarray
#import png ## better png support than PIL
from numpy import array, ndarray
from acq4.util.metaarray import MetaArray as MA
from FileType import *
#import libtiff
#from PyQt4 import QtCore, QtGui
class Array(ndarray): ## just allows us to add some dynamic attributes
def __new__(cls, arr):
return arr.view(cls)
class ImageFile(FileType):
extensions = ['.png', '.tif', '.jpg'] ## list of extensions handled by this class
dataTypes = [MA, ndarray] ## list of python types handled by this class
priority = 50 ## medium priority; MetaArray should be used for writing arrays if possible;
@classmethod
def write(cls, data, dirHandle, fileName, **args):
"""Write data to fileName.
Return the file name written (this allows the function to modify the requested file name)
"""
fileName = cls.addExtension(fileName)
ext = os.path.splitext(fileName)[1].lower()[1:]
img = Image.fromarray(data.transpose())
img.save(os.path.join(dirHandle.name(), fileName))
#if ext in ['tif', 'tiff']:
#d = data.transpose()
#tiff = libtiff.TIFFimage(d, description='')
#tiff.write_file(os.path.join(dirHandle.name(), fileName), compression='none')
#else:
#ims = data.tostring()
#img = QtGui.QImage(buffer(ims), data.shape[1], data.shape[0], QtGui.QImage.Format_ARGB32)
#w = QtGui.QImageWriter(os.path.join(dirHandle.name(), fileName), ext)
#w.write(img)
return fileName
@classmethod
def read(cls, fileHandle):
"""Read a file, return a data object"""
img = Image.open(fileHandle.name())
arr = array(img)
if arr.ndim == 0:
raise Exception("Image has no data. Either 1) this is not a valid image or 2) PIL does not support this image type.")
#ext = os.path.splitext(fileHandle.name())[1].lower()[1:]
#if ext in ['tif', 'tiff']:
#tif = libtiff.TIFFfile(fileHandle.name())
#samples, sample_names = tif.get_samples()
#if len(samples) != 1:
#arr = np.concatenate(samples)
#else:
#arr = samples[0]
#else:
#img = QtGui.QImage()
#img.load(fileHandle.name())
#ptr = img.bits()
#ptr.setsize(img.byteCount())
#buf = buffer(ptr, 0, img.byteCount())
#arr = np.frombuffer(buf, dtype=np.ubyte)
#arr.shape = (img.height(), img.width(), img.depth() / 8)
transp = range(arr.ndim) ## switch axis order y,x to x,y
if arr.ndim == 2:
transp[0] = 1
transp[1] = 0
axisHint = ['x', 'y']
elif arr.ndim == 3:
if len(img.getbands()) > 1:
transp[0] = 1
transp[1] = 0
axisHint = ['x', 'y']
else:
transp[1] = 2
transp[2] = 1
axisHint = ['t', 'x', 'y']
elif arr.ndim == 4:
transp[1] = 2
transp[2] = 1
axisHint = ['t', 'x', 'y']
else:
raise Exception("Bad image size: %s" % str(arr.ndim))
#print arr.shape
arr = arr.transpose(tuple(transp))
axisHint.append(img.getbands())
arr = Array(arr) ## allow addition of new attributes
arr.axisHint = arr
#print arr.shape
return arr
| mit | -7,572,687,360,759,518,000 | 652,654,401,030,003,000 | 34.24359 | 129 | 0.502001 | false |
valentin-krasontovitsch/ansible | lib/ansible/plugins/action/nxos.py | 12 | 7412 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import re
import sys
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.nxos.nxos import nxos_provider_spec
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
self._config_module = True if self._task.action == 'nxos_config' else False
socket_path = None
if (self._play_context.connection == 'httpapi' or self._task.args.get('provider', {}).get('transport') == 'nxapi') \
and self._task.action in ('nxos_file_copy', 'nxos_nxapi'):
return {'failed': True, 'msg': "Transport type 'nxapi' is not valid for '%s' module." % (self._task.action)}
if self._task.action == 'nxos_file_copy':
self._task.args['host'] = self._play_context.remote_addr
self._task.args['password'] = self._play_context.password
if self._play_context.connection == 'network_cli':
self._task.args['username'] = self._play_context.remote_user
elif self._play_context.connection == 'local':
self._task.args['username'] = self._play_context.connection_user
if self._task.action == 'nxos_install_os':
connection = self._connection
if connection.get_option('persistent_command_timeout') < 600 or connection.get_option('persistent_connect_timeout') < 600:
msg = 'PERSISTENT_COMMAND_TIMEOUT and PERSISTENT_CONNECT_TIMEOUT'
msg += ' must be set to 600 seconds or higher when using nxos_install_os module'
return {'failed': True, 'msg': msg}
if self._play_context.connection in ('network_cli', 'httpapi'):
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using %s and will be ignored' % self._play_context.connection)
del self._task.args['provider']
if self._task.args.get('transport'):
display.warning('transport is unnecessary when using %s and will be ignored' % self._play_context.connection)
del self._task.args['transport']
elif self._play_context.connection == 'local':
provider = load_provider(nxos_provider_spec, self._task.args)
transport = provider['transport'] or 'cli'
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
if transport == 'cli':
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'nxos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
command_timeout = int(provider['timeout']) if provider['timeout'] else connection.get_option('persistent_command_timeout')
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
self._task.args['provider'] = ActionModule.nxapi_implementation(provider, self._play_context)
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
if (self._play_context.connection == 'local' and transport == 'cli') or self._play_context.connection == 'network_cli':
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
# Match prompts ending in )# except those with (maint-mode)#
config_prompt = re.compile(r'^.*\((?!maint-mode).*\)#$')
out = conn.get_prompt()
while config_prompt.match(to_text(out, errors='surrogate_then_replace').strip()):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
out = conn.get_prompt()
result = super(ActionModule, self).run(task_vars=task_vars)
return result
@staticmethod
def nxapi_implementation(provider, play_context):
provider['transport'] = 'nxapi'
if provider.get('host') is None:
provider['host'] = play_context.remote_addr
if provider.get('port') is None:
if provider.get('use_ssl'):
provider['port'] = 443
else:
provider['port'] = 80
if provider.get('timeout') is None:
provider['timeout'] = C.PERSISTENT_COMMAND_TIMEOUT
if provider.get('username') is None:
provider['username'] = play_context.connection_user
if provider.get('password') is None:
provider['password'] = play_context.password
if provider.get('use_ssl') is None:
provider['use_ssl'] = False
if provider.get('validate_certs') is None:
provider['validate_certs'] = True
return provider
| gpl-3.0 | -5,236,706,355,036,258,000 | 4,036,367,588,036,096,000 | 45.911392 | 138 | 0.618187 | false |
ubc/edx-platform | common/djangoapps/course_action_state/migrations/0002_add_rerun_display_name.py | 129 | 5409 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseRerunState.display_name'
db.add_column('course_action_state_coursererunstate', 'display_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseRerunState.display_name'
db.delete_column('course_action_state_coursererunstate', 'display_name')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'course_action_state.coursererunstate': {
'Meta': {'unique_together': "(('course_key', 'action'),)", 'object_name': 'CourseRerunState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'should_display': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'updated_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'updated_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'updated_by_user+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"})
}
}
complete_apps = ['course_action_state']
| agpl-3.0 | 4,259,492,256,524,879,000 | -2,271,790,652,700,497,200 | 70.171053 | 193 | 0.564984 | false |
faribas/RMG-Py | rmgpy/quantity.py | 4 | 28300 | #!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2009-2011 by the RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains classes and methods for working with physical quantities,
particularly the :class:`Quantity` class for representing physical quantities.
"""
import numpy
import quantities as pq
import rmgpy.constants as constants
################################################################################
# Explicitly set the default units to SI
pq.set_default_units('si')
# These units are not defined by the quantities package, but occur frequently
# in data handled by RMG, so we define them manually
pq.UnitQuantity('kilocalories', pq.cal*1e3, symbol='kcal')
pq.UnitQuantity('kilojoules', pq.J*1e3, symbol='kJ')
pq.UnitQuantity('kilomoles', pq.mol*1e3, symbol='kmol')
pq.UnitQuantity('molecule', pq.mol/6.02214179e23, symbol='molecule')
pq.UnitQuantity('molecules', pq.mol/6.02214179e23, symbol='molecules')
pq.UnitQuantity('debye', 1.0/(constants.c*1e21)*pq.C*pq.m, symbol='De')
################################################################################
class QuantityError(Exception):
"""
An exception to be raised when an error occurs while working with physical
quantities in RMG. Pass a string describing the circumstances of the
exceptional behavior.
"""
pass
################################################################################
class Units(object):
"""
The :class:`Units` class provides a representation of the units of a
physical quantity. The attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`units` A string representation of the units
=================== ========================================================
Functions that return the conversion factors to and from SI units are
provided.
"""
# A dict of conversion factors (to SI) for each of the frequent units
# Here we also define that cm^-1 is not to be converted to m^-1 (or Hz, J, K, etc.)
conversionFactors = {'cm^-1': 1.0}
def __init__(self, units=''):
self.units = units
def getConversionFactorToSI(self):
"""
Return the conversion factor for converting a quantity in a given set
of`units` to the SI equivalent units.
"""
try:
# Process several common units manually for speed
factor = Units.conversionFactors[self.units]
except KeyError:
# Fall back to (slow!) quantities package for less common units
factor = float(pq.Quantity(1.0, self.units).simplified)
# Cache the conversion factor so we don't ever need to use
# quantities to compute it again
Units.conversionFactors[self.units] = factor
return factor
def getConversionFactorFromSI(self):
"""
Return the conversion factor for converting a quantity to a given set
of `units` from the SI equivalent units.
"""
return 1.0 / self.getConversionFactorToSI()
################################################################################
class ScalarQuantity(Units):
"""
The :class:`ScalarQuantity` class provides a representation of a scalar
physical quantity, with optional units and uncertainty information. The
attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`value` The numeric value of the quantity in the given units
`units` The units the value was specified in
`uncertainty` The numeric uncertainty in the value
`uncertaintyType` The type of uncertainty: ``'+|-'`` for additive, ``'*|/'`` for multiplicative
`value_si` The numeric value of the quantity in the corresponding SI units
=================== ========================================================
It is often more convenient to perform computations using SI units instead
of the given units of the quantity. For this reason, the SI equivalent of
the `value` attribute can be directly accessed using the `value_si`
attribute. This value is cached on the :class:`ScalarQuantity` object for
speed.
"""
def __init__(self, value, units='', uncertainty=None, uncertaintyType='+|-'):
Units.__init__(self, units)
self.value = value
self.uncertaintyType = uncertaintyType
self.uncertainty = float(uncertainty) if uncertainty is not None else 0.0
def __reduce__(self):
"""
Return a tuple of information used to pickle the scalar quantity.
"""
return (ScalarQuantity, (self.value, self.units, self.uncertainty, self.uncertaintyType))
def __str__(self):
"""
Return a string representation of the scalar quantity.
"""
result = '{0:g}'.format(self.value)
if self.uncertainty != 0.0:
result += ' {0} {1:g}'.format(self.uncertaintyType, self.uncertainty)
if self.units != '':
result += ' {0}'.format(self.units)
return result
def __repr__(self):
"""
Return a string representation that can be used to reconstruct the
scalar quantity.
"""
if self.units == '' and self.uncertainty == 0.0:
return '{0:g}'.format(self.value)
else:
result = '({0:g},{1!r}'.format(self.value, self.units)
if self.uncertainty != 0.0:
result += ',{0!r},{1:g}'.format(self.uncertaintyType, self.uncertainty)
result += ')'
return result
def copy(self):
"""
Return a copy of the quantity.
"""
return ScalarQuantity(self.value, self.units, self.uncertainty, self.uncertaintyType)
def getValue(self):
"""
The numeric value of the quantity, in the given units
"""
return self.value_si * self.getConversionFactorFromSI()
def setValue(self, v):
self.value_si = float(v) * self.getConversionFactorToSI()
value = property(getValue, setValue)
def getUncertainty(self):
"""
The numeric value of the uncertainty, in the given units if additive, or no units if multiplicative.
"""
if self.isUncertaintyAdditive():
return self.uncertainty_si * self.getConversionFactorFromSI()
else:
return self.uncertainty_si
def setUncertainty(self, v):
if self.isUncertaintyAdditive():
self.uncertainty_si = float(v) * self.getConversionFactorToSI()
else:
self.uncertainty_si = float(v)
uncertainty = property(getUncertainty, setUncertainty)
def getUncertaintyType(self):
"""
The type of uncertainty: ``'+|-'`` for additive, ``'*|/'`` for multiplicative
"""
return self._uncertaintyType
def setUncertaintyType(self, v):
"""
Check the uncertainty type is valid, then set it, and set the uncertainty to -1.
If you set the uncertainty then change the type, we have no idea what to do with
the units. This ensures you set the type first.
"""
if v not in ['+|-','*|/']:
raise QuantityError("Invalid uncertainty type")
self._uncertaintyType = v
self.uncertainty_si = -1
uncertaintyType = property(getUncertaintyType, setUncertaintyType)
def equals(self, quantity):
"""
Return ``True`` if the everything in a quantity object matches
the parameters in this object. If there are lists of values or uncertainties,
each item in the list must be matching and in the same order.
Otherwise, return ``False``
(Originally intended to return warning if units capitalization was
different, however, Quantity object only parses units matching in case, so
this will not be a problem.)
"""
def approx_equal(x, y, atol = .01):
"""
Returns true if two float/double values are approximately equal
within a relative error of 1% or under a user specific absolute tolerance.
"""
return abs(x-y) <= 1e-2*abs(x) or abs(x-y) <= 1e-2*abs(y) or abs(x-y) <= atol
if isinstance(quantity, ScalarQuantity):
if (self.uncertaintyType == quantity.uncertaintyType and
approx_equal(self.uncertainty * self.getConversionFactorToSI(), quantity.uncertainty * quantity.getConversionFactorToSI()) and
self.units == quantity.units):
if self.units == "kcal/mol":
# set absolute tolerance to .01 kcal/mol = 42 J/mol
atol = 42
else:
# for other units, set it to .01
atol = .01
if not approx_equal(self.value_si, quantity.value_si, atol):
return False
return True
return False
def isUncertaintyAdditive(self):
"""
Return ``True`` if the uncertainty is specified in additive format
and ``False`` otherwise.
"""
return self._uncertaintyType == '+|-'
def isUncertaintyMultiplicative(self):
"""
Return ``True`` if the uncertainty is specified in multiplicative
format and ``False`` otherwise.
"""
return self._uncertaintyType == '*|/'
################################################################################
class ArrayQuantity(Units):
"""
The :class:`ScalarQuantity` class provides a representation of an array of
physical quantity values, with optional units and uncertainty information.
The attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`value` The numeric value of the quantity in the given units
`units` The units the value was specified in
`uncertainty` The numeric uncertainty in the value
`uncertaintyType` The type of uncertainty: ``'+|-'`` for additive, ``'*|/'`` for multiplicative
`value_si` The numeric value of the quantity in the corresponding SI units
=================== ========================================================
It is often more convenient to perform computations using SI units instead
of the given units of the quantity. For this reason, the SI equivalent of
the `value` attribute can be directly accessed using the `value_si`
attribute. This value is cached on the :class:`ArrayQuantity` object for
speed.
"""
def __init__(self, value, units='', uncertainty=None, uncertaintyType='+|-'):
Units.__init__(self, units)
self.value = value
self.uncertaintyType = uncertaintyType
if uncertainty is None:
self.uncertainty = numpy.zeros_like(self.value)
elif isinstance(uncertainty, (int,float)):
self.uncertainty = numpy.ones_like(self.value) * uncertainty
else:
uncertainty = numpy.array(uncertainty)
if uncertainty.ndim != self.value.ndim:
raise QuantityError('The given uncertainty has {0:d} dimensions, while the given value has {1:d} dimensions.'.format(uncertainty.ndim, self.value.ndim))
for i in range(self.value.ndim):
if self.value.shape[i] != uncertainty.shape[i]:
raise QuantityError('Dimension {0:d} has {1:d} elements for the given value, but {2:d} elements for the given uncertainty.'.format(i, self.value.shape[i], uncertainty.shape[i]))
else:
self.uncertainty = uncertainty
def __reduce__(self):
"""
Return a tuple of information used to pickle the array quantity.
"""
return (ArrayQuantity, (self.value, self.units, self.uncertainty, self.uncertaintyType))
def __str__(self):
"""
Return a string representation of the array quantity.
"""
if self.value.ndim == 1:
value = '[{0}]'.format(','.join(['{0:g}'.format(float(v)) for v in self.value]))
elif self.value.ndim == 2:
value = []
for i in range(self.value.shape[0]):
value.append('[{0}]'.format(','.join(['{0:g}'.format(float(self.value[i,j])) for j in range(self.value.shape[1])])))
value = '[{0}]'.format(','.join(value))
if self.uncertainty.ndim == 1:
uncertainty = '[{0}]'.format(','.join(['{0:g}'.format(float(v)) for v in self.uncertainty]))
elif self.uncertainty.ndim == 2:
uncertainty = []
for i in range(self.uncertainty.shape[0]):
uncertainty.append('[{0}]'.format(','.join(['{0:g}'.format(float(self.uncertainty[i,j])) for j in range(self.uncertainty.shape[1])])))
uncertainty = '[{0}]'.format(','.join(uncertainty))
result = '{0}'.format(value)
if any(self.uncertainty != 0.0):
result += ' {0} {1}'.format(self.uncertaintyType, uncertainty)
if self.units != '':
result += ' {0}'.format(self.units)
return result
def __repr__(self):
"""
Return a string representation that can be used to reconstruct the
array quantity.
"""
if self.value.ndim == 1:
value = '[{0}]'.format(','.join(['{0:g}'.format(float(v)) for v in self.value]))
elif self.value.ndim == 2:
value = []
for i in range(self.value.shape[0]):
value.append('[{0}]'.format(','.join(['{0:g}'.format(float(self.value[i,j])) for j in range(self.value.shape[1])])))
value = '[{0}]'.format(','.join(value))
if self.uncertainty.ndim == 1:
uncertainty = '[{0}]'.format(','.join(['{0:g}'.format(float(v)) for v in self.uncertainty]))
elif self.uncertainty.ndim == 2:
uncertainty = []
for i in range(self.uncertainty.shape[0]):
uncertainty.append('[{0}]'.format(','.join(['{0:g}'.format(float(self.uncertainty[i,j])) for j in range(self.uncertainty.shape[1])])))
uncertainty = '[{0}]'.format(','.join(uncertainty))
if self.units == '' and not numpy.any(self.uncertainty != 0.0):
return '{0}'.format(value)
else:
result = '({0},{1!r}'.format(value, self.units)
if numpy.any(self.uncertainty != 0.0):
result += ',{0!r},{1}'.format(self.uncertaintyType, uncertainty)
result += ')'
return result
def copy(self):
"""
Return a copy of the quantity.
"""
return ArrayQuantity(self.value.copy(), self.units, self.uncertainty.copy(), self.uncertaintyType)
def getValue(self):
return self.value_si * self.getConversionFactorFromSI()
def setValue(self, v):
self.value_si = numpy.array(v) * self.getConversionFactorToSI()
value = property(getValue, setValue)
def equals(self, quantity):
"""
Return ``True`` if the everything in a quantity object matches
the parameters in this object. If there are lists of values or uncertainties,
each item in the list must be matching and in the same order.
Otherwise, return ``False``
(Originally intended to return warning if units capitalization was
different, however, Quantity object only parses units matching in case, so
this will not be a problem.)
"""
def approx_equal(x, y, atol = .01):
"""
Returns true if two float/double values are approximately equal
within a relative error of 1% or under a user specific absolute tolerance.
"""
return abs(x-y) <= 1e-2*abs(x) or abs(x-y) <= 1e-2*abs(y) or abs(x-y) <= atol
if isinstance(quantity, ArrayQuantity):
if (self.uncertaintyType == quantity.uncertaintyType and self.units == quantity.units):
if self.units == "kcal/mol":
# set absolute tolerance to .01 kcal/mol = 42 J/mol
atol = 42
else:
# for other units, set it to .01
atol = .01
if self.value.ndim != quantity.value.ndim:
return False
for i in range(self.value.ndim):
if self.value.shape[i] != quantity.value.shape[i]:
return False
for v1, v2 in zip(self.value.flat, quantity.value.flat):
if not approx_equal(v1, v2, atol):
return False
if self.uncertainty.ndim != quantity.uncertainty.ndim:
return False
for i in range(self.uncertainty.ndim):
if self.uncertainty.shape[i] != quantity.uncertainty.shape[i]:
return False
for v1, v2 in zip(self.uncertainty.flat, quantity.uncertainty.flat):
if not approx_equal(v1, v2, atol):
return False
return True
return False
def isUncertaintyAdditive(self):
"""
Return ``True`` if the uncertainty is specified in additive format
and ``False`` otherwise.
"""
return self.uncertaintyType == '+|-'
def isUncertaintyMultiplicative(self):
"""
Return ``True`` if the uncertainty is specified in multiplicative
format and ``False`` otherwise.
"""
return self.uncertaintyType == '*|/'
################################################################################
def Quantity(*args, **kwargs):
"""
Create a :class:`ScalarQuantity` or :class:`ArrayQuantity` object for a
given physical quantity. The physical quantity can be specified in several
ways:
* A scalar-like or array-like value (for a dimensionless quantity)
* An array of arguments (including keyword arguments) giving some or all of
the `value`, `units`, `uncertainty`, and/or `uncertaintyType`.
* A tuple of the form ``(value,)``, ``(value,units)``,
``(value,units,uncertainty)``, or
``(value,units,uncertaintyType,uncertainty)``
* An existing :class:`ScalarQuantity` or :class:`ArrayQuantity` object, for
which a copy is made
"""
# Initialize attributes
value = None
units = ''
uncertaintyType = '+|-'
uncertainty = None
if len(args) == 1 and len(kwargs) == 0 and args[0] is None:
return None
# Unpack args if necessary
if isinstance(args, tuple) and len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# Process args
Nargs = len(args)
if Nargs == 1 and isinstance(args[0], (ScalarQuantity,ArrayQuantity)):
# We were given another quantity object, so make a (shallow) copy of it
other = args[0]
value = other.value
units = other.units
uncertaintyType = other.uncertaintyType
uncertainty = other.uncertainty
elif Nargs == 1:
# If one parameter is given, it should be a single value
value, = args
elif Nargs == 2:
# If two parameters are given, it should be a value and units
value, units = args
elif Nargs == 3:
# If three parameters are given, it should be a value, units and uncertainty
value, units, uncertainty = args
elif Nargs == 4:
# If four parameters are given, it should be a value, units, uncertainty type, and uncertainty
value, units, uncertaintyType, uncertainty = args
elif Nargs != 0:
raise QuantityError('Invalid parameters {0!r} passed to ArrayQuantity.__init__() method.'.format(args))
# Process kwargs
for k, v in kwargs.items():
if k == 'value':
if len(args) >= 1:
raise QuantityError('Multiple values for argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
else:
value = v
elif k == 'units':
if len(args) >= 2:
raise QuantityError('Multiple values for argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
else:
units = v
elif k == 'uncertainty':
if len(args) >= 3:
raise QuantityError('Multiple values for argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
else:
uncertainty = v
elif k == 'uncertaintyType':
if len(args) >= 4:
raise QuantityError('Multiple values for argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
else:
uncertaintyType = v
else:
raise QuantityError('Invalid keyword argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
# Process units and uncertainty type parameters
if uncertaintyType not in ['+|-', '*|/']:
raise QuantityError('Unexpected uncertainty type "{0}"; valid values are "+|-" and "*|/".'.format(uncertaintyType))
if isinstance(value, (list,tuple,numpy.ndarray)):
return ArrayQuantity(value, units, uncertainty, uncertaintyType)
try:
value = float(value)
except TypeError:
return ArrayQuantity(value, units, uncertainty, uncertaintyType)
uncertainty = 0.0 if uncertainty is None else float(uncertainty)
return ScalarQuantity(value, units, uncertainty, uncertaintyType)
################################################################################
class UnitType:
"""
The :class:`UnitType` class represents a factory for producing
:class:`ScalarQuantity` or :class:`ArrayQuantity` objects of a given unit
type, e.g. time, volume, etc.
"""
def __init__(self, units, commonUnits=None, extraDimensionality=None):
self.units = units
self.dimensionality = pq.Quantity(1.0, units).simplified.dimensionality
self.commonUnits = commonUnits or []
self.extraDimensionality = {}
if extraDimensionality:
for unit, factor in extraDimensionality.items():
self.extraDimensionality[pq.Quantity(1.0, unit).simplified.dimensionality] = factor
def __call__(self, *args, **kwargs):
# Make a ScalarQuantity or ArrayQuantity object out of the given parameter
quantity = Quantity(*args, **kwargs)
if quantity is None:
return quantity
units = quantity.units
# If the units are in the common units, then we can do the conversion
# very quickly and avoid the slow calls to the quantities package
if units == self.units or units in self.commonUnits:
return quantity
# Check that the units are consistent with this unit type
# This uses the quantities package (slow!)
units = pq.Quantity(1.0, units)
dimensionality = units.simplified.dimensionality
if dimensionality == self.dimensionality:
pass
elif dimensionality in self.extraDimensionality:
quantity.value_si *= self.extraDimensionality[dimensionality]
quantity.units = self.units
else:
raise QuantityError('Invalid units {0!r}.'.format(quantity.units))
# Return the Quantity or ArrayQuantity object object
return quantity
Acceleration = UnitType('m/s^2')
Area = UnitType('m^2')
Concentration = UnitType('mol/m^3')
Dimensionless = UnitType('')
DipoleMoment = UnitType('C*m', extraDimensionality={
'De': 1.0 / (1.0e21 * constants.c),
})
Energy = Enthalpy = FreeEnergy = UnitType('J/mol', commonUnits=['kJ/mol', 'cal/mol', 'kcal/mol'])
Entropy = HeatCapacity = UnitType('J/(mol*K)', commonUnits=['kJ/(mol*K)', 'cal/(mol*K)', 'kcal/(mol*K)'])
Flux = UnitType('mol/(m^2*s)')
Frequency = UnitType('cm^-1', extraDimensionality={
's^-1': 1.0 / (constants.c * 100.),
'Hz': 1.0 / (constants.c * 100.),
'J': 1.0 / (constants.h * constants.c * 100.),
'K': constants.kB / (constants.h * constants.c * 100.),
})
Force = UnitType('N')
Inertia = UnitType('kg*m^2')
Length = UnitType('m')
Mass = UnitType('amu', extraDimensionality={'kg/mol': 1000.*constants.amu})
Momentum = UnitType('kg*m/s^2')
Power = UnitType('W')
Pressure = UnitType('Pa', commonUnits=['bar', 'atm', 'torr', 'psi', 'mbar'])
Temperature = UnitType('K', commonUnits=['degC', 'degF', 'degR'])
Time = UnitType('s')
Velocity = UnitType('m/s')
Volume = UnitType('m^3')
# Polarizability = UnitType('C*m^2*V^-1')
"""
What's called Polarizability in the transport properties is in fact a polarizability volume,
which is related by $4*\pi*\epsilon_0$ where $\epsilon_0$ is the permittivity of free space.
Rather than mess around with conversions, I suggest we just use "Volume" as the units for
what we call 'polarizability'. Chemkin expects it in Angstrom^3. We'll store it in m^3.
"""
# RateCoefficient is handled as a special case since it can take various
# units depending on the reaction order
RATECOEFFICIENT_CONVERSION_FACTORS = {
(1.0/pq.s).dimensionality: 1.0,
(pq.m**3/pq.s).dimensionality: 1.0,
(pq.m**6/pq.s).dimensionality: 1.0,
(pq.m**9/pq.s).dimensionality: 1.0,
(pq.m**3/(pq.mol*pq.s)).dimensionality: 1.0,
(pq.m**6/(pq.mol**2*pq.s)).dimensionality: 1.0,
(pq.m**9/(pq.mol**3*pq.s)).dimensionality: 1.0,
}
RATECOEFFICIENT_COMMON_UNITS = ['s^-1', 'm^3/(mol*s)', 'cm^3/(mol*s)', 'm^3/(molecule*s)', 'cm^3/(molecule*s)']
def RateCoefficient(*args, **kwargs):
# Make a ScalarQuantity or ArrayQuantity object out of the given parameter
quantity = Quantity(*args, **kwargs)
if quantity is None:
return quantity
units = quantity.units
# If the units are in the common units, then we can do the conversion
# very quickly and avoid the slow calls to the quantities package
if units in RATECOEFFICIENT_COMMON_UNITS:
return quantity
dimensionality = pq.Quantity(1.0, quantity.units).simplified.dimensionality
try:
factor = RATECOEFFICIENT_CONVERSION_FACTORS[dimensionality]
quantity.value_si *= factor
except KeyError:
raise QuantityError('Invalid units {0!r}.'.format(quantity.units))
# Return the Quantity or ArrayQuantity object object
return quantity
| mit | 9,025,523,001,271,691,000 | -305,335,256,639,862,000 | 40.133721 | 197 | 0.579894 | false |
SnappleCap/oh-mainline | vendor/packages/Django/tests/regressiontests/templates/parser.py | 58 | 3218 | """
Testing some internals of the template processing. These are *not* examples to be copied in user code.
"""
from __future__ import unicode_literals
from django.template import (TokenParser, FilterExpression, Parser, Variable,
TemplateSyntaxError)
from django.utils.unittest import TestCase
class ParserTests(TestCase):
def test_token_parsing(self):
# Tests for TokenParser behavior in the face of quoted strings with
# spaces.
p = TokenParser("tag thevar|filter sometag")
self.assertEqual(p.tagname, "tag")
self.assertEqual(p.value(), "thevar|filter")
self.assertTrue(p.more())
self.assertEqual(p.tag(), "sometag")
self.assertFalse(p.more())
p = TokenParser('tag "a value"|filter sometag')
self.assertEqual(p.tagname, "tag")
self.assertEqual(p.value(), '"a value"|filter')
self.assertTrue(p.more())
self.assertEqual(p.tag(), "sometag")
self.assertFalse(p.more())
p = TokenParser("tag 'a value'|filter sometag")
self.assertEqual(p.tagname, "tag")
self.assertEqual(p.value(), "'a value'|filter")
self.assertTrue(p.more())
self.assertEqual(p.tag(), "sometag")
self.assertFalse(p.more())
def test_filter_parsing(self):
c = {"article": {"section": "News"}}
p = Parser("")
def fe_test(s, val):
self.assertEqual(FilterExpression(s, p).resolve(c), val)
fe_test("article.section", "News")
fe_test("article.section|upper", "NEWS")
fe_test('"News"', "News")
fe_test("'News'", "News")
fe_test(r'"Some \"Good\" News"', 'Some "Good" News')
fe_test(r'"Some \"Good\" News"', 'Some "Good" News')
fe_test(r"'Some \'Bad\' News'", "Some 'Bad' News")
fe = FilterExpression(r'"Some \"Good\" News"', p)
self.assertEqual(fe.filters, [])
self.assertEqual(fe.var, 'Some "Good" News')
# Filtered variables should reject access of attributes beginning with
# underscores.
self.assertRaises(TemplateSyntaxError,
FilterExpression, "article._hidden|upper", p
)
def test_variable_parsing(self):
c = {"article": {"section": "News"}}
self.assertEqual(Variable("article.section").resolve(c), "News")
self.assertEqual(Variable('"News"').resolve(c), "News")
self.assertEqual(Variable("'News'").resolve(c), "News")
# Translated strings are handled correctly.
self.assertEqual(Variable("_(article.section)").resolve(c), "News")
self.assertEqual(Variable('_("Good News")').resolve(c), "Good News")
self.assertEqual(Variable("_('Better News')").resolve(c), "Better News")
# Escaped quotes work correctly as well.
self.assertEqual(
Variable(r'"Some \"Good\" News"').resolve(c), 'Some "Good" News'
)
self.assertEqual(
Variable(r"'Some \'Better\' News'").resolve(c), "Some 'Better' News"
)
# Variables should reject access of attributes beginning with
# underscores.
self.assertRaises(TemplateSyntaxError,
Variable, "article._hidden"
)
| agpl-3.0 | 6,670,557,994,964,236,000 | -7,340,438,015,047,227,000 | 36.858824 | 102 | 0.604723 | false |
paulmathews/nova | nova/api/openstack/compute/contrib/server_diagnostics.py | 19 | 2505 | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
authorize = extensions.extension_authorizer('compute', 'server_diagnostics')
sd_nsmap = {None: wsgi.XMLNS_V11}
class ServerDiagnosticsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('diagnostics')
elem = xmlutil.SubTemplateElement(root, xmlutil.Selector(0),
selector=xmlutil.get_items)
elem.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=sd_nsmap)
class ServerDiagnosticsController(object):
@wsgi.serializers(xml=ServerDiagnosticsTemplate)
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context)
compute_api = compute.API()
try:
instance = compute_api.get(context, server_id)
except exception.NotFound():
raise webob.exc.HTTPNotFound(_("Instance not found"))
return compute_api.get_diagnostics(context, instance)
class Server_diagnostics(extensions.ExtensionDescriptor):
"""Allow Admins to view server diagnostics through server action"""
name = "ServerDiagnostics"
alias = "os-server-diagnostics"
namespace = ("http://docs.openstack.org/compute/ext/"
"server-diagnostics/api/v1.1")
updated = "2011-12-21T00:00:00+00:00"
def get_resources(self):
parent_def = {'member_name': 'server', 'collection_name': 'servers'}
#NOTE(bcwaldon): This should be prefixed with 'os-'
ext = extensions.ResourceExtension('diagnostics',
ServerDiagnosticsController(),
parent=parent_def)
return [ext]
| apache-2.0 | -8,241,044,240,906,971,000 | 236,797,687,318,025,600 | 36.38806 | 78 | 0.66986 | false |
forivall/tacoterm | mscript.py | 3 | 29478 | #!/usr/bin/env python
import errno, os, re, sys, time
from maitch import *
ctx = Context(PACKAGE = "roxterm", SRC_DIR = "${TOP_DIR}/src",
MCFLAGS = "${CPPFLAGS} -I. -I${SRC_DIR} -D_GNU_SOURCE -DHAVE_CONFIG_H")
MINILIB_SOURCES = "colourscheme.c dlg.c display.c dragrcv.c dynopts.c " \
"encodings.c globalopts.c logo.c options.c optsfile.c rtdbus.c"
ROXTERM_CONFIG_SOURCES = "capplet.c colourgui.c configlet.c getname.c " \
"optsdbus.c profilegui.c shortcuts.c"
ROXTERM_SOURCES = "about.c main.c multitab.c multitab-close-button.c " \
"multitab-label.c menutree.c optsdbus.c roxterm.c search.c " \
"shortcuts.c uri.c x11support.c"
ROXTERM_HTML_BASENAMES = "guide index installation news".split()
LOGO_PNG = "${TOP_DIR}/Help/lib/roxterm_logo.png"
FAVICON = "${TOP_DIR}/Help/lib/favicon.ico"
TEXT_LOGO = "${TOP_DIR}/Help/lib/logo_text.png"
APPINFO = "${TOP_DIR}/AppInfo.xml"
VFILE = "${TOP_DIR}/version"
if ctx.mode == 'configure' or ctx.mode == 'help':
ctx.arg_disable('gtk-native-tab-dragging',
"Use ROXTerm's legacy code for dragging tabs "
"instead of GTK+'s functions")
ctx.arg_with('gnome-default-applications',
"Where to install GNOME Default Applications file",
default = None)
ctx.arg_disable('sm', "Don't enable session management")
ctx.arg_disable('nls', "Disable all translations",
default = None)
ctx.arg_disable('translations',
"Disable all translations (same as --disable-nls)", default = None)
ctx.arg_disable('git', "Assume this is a release tarball: "
"don't attempt to generate changelogs, pixmaps etc")
ctx.arg_enable("rox-locales",
"Make symlinks so ROX app can load translations")
if ctx.mode == 'configure':
ctx.find_prog_env("sed")
try:
ctx.find_prog_env("gpg")
except MaitchNotFoundError:
mprint("gpg not found, not signing tarball")
ctx.setenv('SIGN_DIST', False)
else:
ctx.setenv('SIGN_DIST', True)
vfile = ctx.subst(VFILE)
if ctx.env['ENABLE_GIT'] != False:
git = os.path.exists(ctx.subst(opj("${TOP_DIR}", ".git")))
try:
ctx.find_prog_env("git")
except MaitchNotFoundError:
git = False
else:
git = False
if git:
# Might have an obsolete version.h from earlier build
ctx.delete("${SRC_DIR}/version.h")
version = ctx.prog_output(["${GIT}", "describe",
"--match", "[0-9]*"])[0].strip()
version = version.replace('-', '.', 1).replace('-', '~', 1)
ctx.save_if_different(vfile, version + '\n')
gitlog = ctx.prog_output(["/bin/sh", os.path.abspath(
ctx.subst("${TOP_DIR}/genlog"))])[0].lstrip()
# Can't use ctx.save_if_different because it tries to subst
# content and fails
save_if_different(ctx.subst("${TOP_DIR}/ChangeLog"), gitlog)
else:
fp = open(vfile, 'r')
version = fp.read().strip()
fp.close()
ctx.setenv('VERSION', version)
try:
ctx.find_prog_env("xsltproc", "XMLTOMAN")
except MaitchNotFoundError:
mprint("Unable to build manpages without xsltproc", file = sys.stderr)
ctx.setenv("XMLTOMAN", "")
else:
ctx.setenv("XMLTOMAN_OPTS", "-o ${TGT} --nonet --novalid " \
"--param man.charmap.use.subset 0 " \
"http://docbook.sourceforge.net/release/xsl/" \
"current/manpages/docbook.xsl")
ctx.setenv("XMLTOMAN_OUTPUT", "")
if ctx.env['ENABLE_GIT'] != False:
try:
ctx.find_prog_env("convert")
ctx.find_prog_env("composite")
try:
ctx.find_prog_env("rsvg-convert")
except:
ctx.find_prog_env("rsvg")
except:
mprint("WARNING: ImageMagick and/or rsvg binaries appear " \
"not to be installed.\n" \
"This will cause errors later unless the generated " \
"pixmaps are already present,\neg supplied with a " \
"release tarball.",
file = sys.stderr)
ctx.setenv("CONVERT", "")
ctx.setenv("COMPOSITE", "")
else:
ctx.setenv("CONVERT", "")
ctx.setenv("COMPOSITE", "")
ctx.setenv('BUG_TRACKER', "http://sourceforge.net/tracker/?group_id=124080")
trans = ctx.env['ENABLE_TRANSLATIONS'] and ctx.env['ENABLE_NLS']
if trans != False:
try:
ctx.find_prog_env("xgettext")
ctx.find_prog_env("msgcat")
ctx.find_prog_env("msgmerge")
ctx.find_prog_env("msgfmt")
except MaitchNotFoundError:
if trans == True:
raise
else:
mprint("WARNING: Translation tools not found, not building " \
" programs' translations", file = sys.stderr)
ctx.setenv('HAVE_GETTEXT', False)
else:
ctx.setenv('HAVE_GETTEXT', True)
else:
ctx.setenv('HAVE_GETTEXT', False)
if trans != False:
try:
ctx.find_prog_env("po4a-gettextize")
ctx.find_prog_env("po4a-updatepo")
ctx.find_prog_env("po4a-translate")
except MaitchNotFoundError:
if trans == True:
raise
else:
mprint("WARNING: po4a tools not found, not building " \
"documentation's translations", file = sys.stderr)
ctx.setenv('HAVE_PO4A', False)
else:
ctx.setenv('HAVE_PO4A', True)
ctx.setenv('PO4ACHARSET', "-M UTF-8")
ctx.setenv('PO4AOPTS', "${PO4ACHARSET} --package-name=${PACKAGE} " \
"--package-version=${VERSION} " \
"--copyright-holder='Tony Houghton' " \
"--msgid-bugs-address=${BUG_TRACKER}")
ctx.setenv('PO4ADIR', "${ABS_TOP_DIR}/po4a")
ctx.setenv('PO4ABUILDDIR', "${ABS_BUILD_DIR}/po4a")
else:
ctx.setenv('HAVE_PO4A', False)
if trans != False and ctx.env['HAVE_GETTEXT']:
try:
ctx.find_prog_env("itstool")
except MaitchNotFoundError:
if trans == True:
raise
else:
mprint("WARNING: itstool not found, not building " \
"AppData file's translations", file = sys.stderr)
ctx.setenv('HAVE_ITSTOOL', False)
else:
ctx.setenv('HAVE_ITSTOOL', True)
ctx.setenv('POXML_DIR', "${ABS_TOP_DIR}/poxml")
ctx.setenv('POXML_BUILD_DIR', "${ABS_BUILD_DIR}/poxml")
ctx.setenv('APPDATA_ITS', "${POXML_DIR}/appdata.its")
else:
ctx.setenv('HAVE_ITSTOOL', False)
gda = ctx.env.get("WITH_GNOME_DEFAULT_APPLICATIONS")
if gda == None or gda == True:
try:
gda = ctx.prog_output("${PKG_CONFIG} "
"--variable=defappsdir gnome-default-applications")
except MaitchChildError:
if gda == True:
raise
else:
gda = ""
elif gda == False:
gda = ""
ctx.setenv("WITH_GNOME_DEFAULT_APPLICATIONS", gda)
ctx.pkg_config('gtk+-3.0', 'GTK', '3.10')
ctx.pkg_config('vte-2.91', 'VTE')
vte_version = ctx.prog_output("${PKG_CONFIG} --modversion vte-2.91")
ctx.setenv('NEED_TRANSPARENCY_FIX', vte_version >= "0.34.8")
sm = ctx.env['ENABLE_SM']
if sm != False:
try:
ctx.pkg_config('sm ice', 'SM')
except MaitchChildError:
if sm == True:
raise
sm = False
else:
sm = True
ctx.define('ENABLE_SM', sm)
ctx.pkg_config('dbus-1', 'DBUS', '1.0')
ctx.pkg_config('dbus-glib-1', 'DBUS', '0.22')
ctx.pkg_config('gmodule-export-2.0', 'GMODULE')
ctx.pkg_config('x11')
for f in ["get_current_dir_name"]:
ctx.check_func(f, "${CFLAGS} ${MCFLAGS} ${LIBS}")
#for f in "get_current_dir_name g_mkdir_with_parents " \
# "gdk_window_get_display gdk_window_get_screen " \
# "gtk_widget_get_realized gtk_widget_get_mapped " \
# "gtk_combo_box_text_new gtk_rc_style_unref " \
# "gtk_drag_begin_with_coordinates".split():
# ctx.check_func(f, "${CFLAGS} ${MCFLAGS} ${GTK_CFLAGS}",
# "${LIBS} ${GTK_LIBS}")
for f in ["vte_terminal_set_word_chars",
"vte_terminal_set_background_tint_color"]:
ctx.check_func(f, "${CFLAGS} ${MCFLAGS} ${VTE_CFLAGS}",
"${LIBS} ${VTE_LIBS}")
ctx.setenv('CORE_CFLAGS',
"${CFLAGS} ${MCFLAGS} ${GTK_CFLAGS} ${DBUS_CFLAGS}")
ctx.setenv('CORE_LIBS',
"${LIBS} ${GTK_LIBS} ${DBUS_LIBS}")
ctx.setenv('ROXTERM_CFLAGS',
"${CFLAGS} ${MCFLAGS} ${VTE_CFLAGS} ${SM_CFLAGS} ${DBUS_CFLAGS}")
ctx.setenv('ROXTERM_LIBS',
"${LIBS} ${VTE_LIBS} ${SM_LIBS} ${DBUS_LIBS} ${X11_LIBS}")
ctx.setenv('ROXTERM_CONFIG_CFLAGS',
"${CFLAGS} ${MCFLAGS} ${GTK_CFLAGS} ${DBUS_CFLAGS} " \
"${GMODULE_CFLAGS} -DROXTERM_CAPPLET")
ctx.setenv('ROXTERM_CONFIG_LIBS',
"${LIBS} ${GTK_LIBS} ${DBUS_LIBS} ${GMODULE_LIBS}")
ctx.define_from_var('PACKAGE')
ctx.define('DO_OWN_TAB_DRAGGING',
ctx.env.get('ENABLE_GTK_NATIVE_TAB_DRAGGING', True))
ctx.define('SYS_CONF_DIR', ctx.env['SYSCONFDIR'])
ctx.define('DATA_DIR', ctx.env['DATADIR'])
ctx.define('PKG_DATA_DIR', opj(ctx.env['DATADIR'], "roxterm"))
ctx.define('ICON_DIR',
opj(ctx.env['DATADIR'], "icons", "hicolor", "scalable", "apps"))
ctx.define('HTML_DIR', ctx.env['HTMLDIR'])
ctx.setenv('htmldir', "${HTMLDIR}")
ctx.define('BIN_DIR', ctx.env['BINDIR'])
if ctx.env['HAVE_GETTEXT']:
ctx.define('ENABLE_NLS', 1)
else:
ctx.define('ENABLE_NLS', None)
ctx.define_from_var('LOCALEDIR')
ctx.define_from_var('NEED_TRANSPARENCY_FIX')
ctx.subst_file("${TOP_DIR}/roxterm.1.xml.in",
"${BUILD_DIR}/roxterm.1.xml", True)
ctx.subst_file("${TOP_DIR}/roxterm-config.1.xml.in",
"${BUILD_DIR}/roxterm-config.1.xml", True)
ctx.subst_file("${TOP_DIR}/roxterm.spec.in", "${BUILD_DIR}/roxterm.spec")
ctx.subst_file("${TOP_DIR}/.ycm_extra_conf.py.in",
"${TOP_DIR}/.ycm_extra_conf.py", True)
ctx.setenv('APPINFO_STRING', "${VERSION} (%s)" % \
time.strftime("%Y-%m-%d", time.gmtime(time.time())))
if not os.path.exists(ctx.subst(APPINFO)):
ctx.subst_file(APPINFO + ".in", APPINFO)
ctx.save_if_different("version.h",
'/* Auto-generated by mscript.py */\n' \
'#ifndef VERSION_H\n' \
'#define VERSION_H\n' \
'#define VERSION "${VERSION}"\n' \
'#endif\n')
ctx.created_by_config['version.h'] = True
# Make symlinks expected by ROX
for f in "AUTHORS ChangeLog COPYING COPYING-LGPL NEWS README".split():
if f == "ChangeLog":
dest = "${TOP_DIR}/Help/Changes"
else:
dest = "${TOP_DIR}/Help/" + f
src = "../" + f
if subprocess.call(["ln", "-nfs", src, ctx.subst(dest)]):
raise MaitchChildError("Failed to link '%s' Help file" % f)
elif ctx.mode == 'build':
# Private library
for c in MINILIB_SOURCES.split():
ctx.add_rule(StaticLibCRule(
sources = c,
cflags = "${CORE_CFLAGS}",
prefix = "libroxterm-",
quiet = True))
ctx.add_rule(CStaticLibRule(
sources = change_suffix_with_prefix(MINILIB_SOURCES,
".c", ".lo", "libroxterm-"),
targets = "libroxterm.la",
cflags = "${CORE_CFLAGS}",
libs = "${CORE_LIBS}",
quiet = True))
# roxterm
if bool(ctx.env['ENABLE_SM']):
ROXTERM_SOURCES += " session.c"
for c in ROXTERM_SOURCES.split():
ctx.add_rule(LibtoolCRule(
sources = c,
cflags = "${ROXTERM_CFLAGS}",
prefix = "roxterm-",
wdeps = "version.h"))
ctx.add_rule(LibtoolCProgramRule(
sources = change_suffix_with_prefix(ROXTERM_SOURCES,
".c", ".lo", "roxterm-"),
targets = "roxterm",
cflags = "${ROXTERM_CFLAGS}",
libs = "${ROXTERM_LIBS} -lroxterm",
deps = "libroxterm.la",
quiet = True))
# roxterm-config
for c in ROXTERM_CONFIG_SOURCES.split():
ctx.add_rule(LibtoolCRule(
sources = c,
cflags = "${ROXTERM_CONFIG_CFLAGS}",
prefix = "roxterm-config-"))
ctx.add_rule(LibtoolCProgramRule(
sources = change_suffix_with_prefix(ROXTERM_CONFIG_SOURCES,
".c", ".lo", "roxterm-config-"),
targets = "roxterm-config",
cflags = "${ROXTERM_CONFIG_CFLAGS}",
libs = "${ROXTERM_CONFIG_LIBS} -lroxterm",
deps = "libroxterm.la",
quiet = True))
# Stuff other than the program
# Graphics
if ctx.env['CONVERT']:
ctx.add_rule(Rule(rule = "${CONVERT} -background #0000 " \
"${SRC} -geometry 64x64 ${TGT}",
targets = LOGO_PNG,
sources = "roxterm.svg",
where = TOP))
# Note 'where' is NOWHERE for following two rules because sources
# already start with ${TOP_DIR}.
ctx.add_rule(Rule(rule = "${CONVERT} ${SRC} -geometry 16x16 ${TGT}",
targets = FAVICON,
sources = LOGO_PNG,
where = NOWHERE))
ctx.add_rule(Rule( \
rule = "${COMPOSITE} -gravity SouthWest ${SRC} ${TGT}",
targets = TEXT_LOGO,
sources = [LOGO_PNG, "${TOP_DIR}/Help/lib/logo_text_only.png"],
where = NOWHERE))
# man pages
if ctx.env['XMLTOMAN']:
xmltomanrule = "${XMLTOMAN} ${XMLTOMAN_OPTS} ${SRC} ${XMLTOMAN_OUTPUT}"
# Something is causing a thread to hang between calling xsltproc
# and exiting from subprocess.Popen. Could it be trying to run two
# at once? Making one wdep on the other should stop jobs overlapping.
ctx.add_rule(Rule(
rule = xmltomanrule,
targets = "roxterm.1",
sources = "roxterm.1.xml",
where = TOP))
ctx.add_rule(Rule(
rule = xmltomanrule,
targets = "roxterm-config.1",
sources = "roxterm-config.1.xml",
wdeps = "roxterm.1",
where = TOP))
#ctx.add_rule(SuffixRule(
# rule = xmltomanrule,
# targets = ".1",
# sources = ".1.xml",
# where = TOP))
# Force invocation of above suffix rule
#ctx.add_rule(TouchRule(
# targets = "manpages",
# sources = "roxterm.1 roxterm-config.1"))
# Translations (gettext)
if ctx.env['HAVE_GETTEXT']:
podir = '${ABS_BUILD_DIR}/po'
ctx.add_rule(Rule(rule = mkdir_rule, targets = podir))
args = { 'copyright_holder': "(c) 2013 Tony Houghton",
'version': "${VERSION}",
'bugs_addr': "${BUG_TRACKER}",
'use_shell': True,
'dir': podir }
code_pot = '${ABS_BUILD_DIR}/po/code.pot'
glade_pot = '${ABS_BUILD_DIR}/po/glade.pot'
trans_rules = PoRulesFromLinguas(ctx, **args) + \
PotRules(ctx,
targets = code_pot,
deps = podir,
xgettext_opts = '-C -k_ -kN_',
**args)
for r in trans_rules:
ctx.add_rule(r)
ctx.add_rule(PotRule(ctx,
sources = '../src/roxterm-config.ui',
targets = glade_pot,
deps = podir,
xgettext_opts = '-L Glade',
dir = "${ABS_TOP_DIR}/po"))
ctx.add_rule(Rule(sources = [code_pot, glade_pot],
targets = '${ABS_TOP_DIR}/po/${PACKAGE}.pot',
rule = '${MSGCAT} -o ${TGT} ${SRC}',
diffpat = gettext_diffpat))
# Symlinks so ROX can use translations
if ctx.env["ENABLE_ROX_LOCALES"]:
def add_rox_locale(ctx, l, f):
d = opj("locale", l, "LC_MESSAGES")
ctx.add_rule(Rule(rule = mkdir_rule, targets = d))
ctx.add_rule(Rule(rule = "ln -nfs ../../po/%s.mo ${TGT}" % l,
targets = opj(d, "roxterm.mo"),
wdeps = [d, opj("po", "%s.mo" % l)]))
foreach_lingua(ctx, add_rox_locale)
ctx.add_rule(Rule(rule = "ln -nfs pt_BR ${TGT}",
targets = opj("locale", "pt"),
wdeps = opj("locale", "pt_BR", "LC_MESSAGES")))
# Translations (po4a)
if ctx.env['HAVE_PO4A']:
linguas = parse_linguas(ctx, podir = "${PO4ADIR}")
charset_rule = "${SED} -i s/charset=CHARSET/charset=UTF-8/ ${TGT}"
ctx.ensure_out_dir("po4a")
if ctx.env['XMLTOMAN']:
# Workaround for deadlock (?)
lastmtarget = None
for m in ["roxterm", "roxterm-config"]:
ctx.add_rule(Rule(rule = ["${PO4A_GETTEXTIZE} ${PO4AOPTS} " \
"-f docbook -m ${SRC} -p ${TGT}",
charset_rule],
sources = "../%s.1.xml.in" % m,
targets = "%s.1.pot" % m,
where = NOWHERE,
diffpat = gettext_diffpat,
dir = "${PO4ADIR}",
use_shell = True))
for l in linguas:
po = "${PO4ADIR}/%s.1.%s.po" % (m, l)
ctx.add_rule(Rule(rule = ["${PO4A_UPDATEPO} ${PO4AOPTS} " \
"-f docbook -m ${SRC} -p ${TGT}",
charset_rule,
"rm -f ${TGT}~"],
sources = ["../%s.1.xml.in" % m, "%s.1.pot" % m],
targets = po,
diffpat = gettext_diffpat,
where = NOWHERE,
dir = "${PO4ADIR}",
use_shell = True))
ctx.add_rule(Rule(rule = "${PO4A_TRANSLATE} "
"${PO4ACHARSET} " \
"-k 0 -f docbook -m ../%s.1.xml.in " \
"-p ${SRC} -l ${TGT}" % m,
sources = po,
targets = "${ABS_BUILD_DIR}/po4a/%s.1.%s.xml.in" % \
(m, l),
where = NOWHERE,
dir = "${PO4ADIR}",
use_shell = True))
ctx.add_rule(Rule(rule = "${SED} " \
"'s/@VERSION@/${VERSION}/; " \
"s#@htmldir@#${HTMLDIR}#' <${SRC} >${TGT}",
sources = "${PO4ABUILDDIR}/%s.1.%s.xml.in" % (m, l),
targets = "${PO4ABUILDDIR}/%s.1.%s.xml" % (m, l),
deps = po,
where = NOWHERE,
use_shell = True))
mtarget = "${PO4ABUILDDIR}/%s/%s.1" % (l, m)
ctx.add_rule(Rule( \
rule = [mk_parent_dir_rule, xmltomanrule],
sources = "${PO4ABUILDDIR}/%s.1.%s.xml" % (m, l),
targets = mtarget,
wdeps = lastmtarget,
where = NOWHERE))
lastmtarget = mtarget
for h in ROXTERM_HTML_BASENAMES:
master = "../Help/en/%s.html" % h
pot = "%s.html.pot" % h
ctx.add_rule(Rule(rule = ["${PO4A_GETTEXTIZE} ${PO4AOPTS} " \
"-f xhtml -m ${SRC} -p ${TGT}",
charset_rule,
"${SED} -i 's/SOME DESCRIPTIVE TITLE/" \
"Translations for roxterm docs/' ${TGT}",
"${SED} -i 's/Copyright (C) YEAR/" + \
"Copyright (C) 2010-2014/' " \
"${TGT}",
"${SED} -i 's/FIRST AUTHOR <EMAIL@ADDRESS>, YEAR/"
"Tony Houghton <[email protected]>, 2014/' ${TGT}"],
sources = master,
targets = "${PO4ADIR}/" + pot,
where = NOWHERE,
dir = "${PO4ADIR}",
use_shell = True))
for l in linguas:
ldir = "../Help/%s" % l
ctx.ensure_out_dir(ldir)
po = "${PO4ADIR}/%s.html.%s.po" % (h, l)
ctx.add_rule(Rule(rule = ["${PO4A_UPDATEPO} ${PO4AOPTS} " \
"-f xhtml -m ${SRC} -p ${TGT}",
charset_rule],
sources = [master, pot],
targets = po,
where = NOWHERE,
dir = "${PO4ADIR}",
use_shell = True))
ctx.add_rule(Rule(rule = [mk_parent_dir_rule,
"${PO4A_TRANSLATE} "
"${PO4ACHARSET} " \
"-k 0 -f xhtml -m %s " \
"-p ${SRC} -l ${TGT}" % master],
sources = po,
targets = "${ABS_TOP_DIR}/Help/%s/%s.html" % (ldir, h),
where = NOWHERE,
dir = "${PO4ADIR}",
use_shell = True))
# Translations (itstool)
if ctx.env['HAVE_ITSTOOL']:
podir = "${POXML_DIR}"
linguas = parse_linguas(ctx, podir = podir)
basename = "roxterm.appdata.xml"
xmlout = "${ABS_BUILD_DIR}/" + basename
xmlin = "../" + basename + ".in"
potfile = "${POXML_DIR}/roxterm.appdata.xml.pot"
ctx.add_rule(Rule( \
rule = ["${ITSTOOL} -i ${APPDATA_ITS} -o ${TGT} ${SRC}",
"${SED} -i 's/Project-Id-Version: PACKAGE VERSION/" \
"Project-Id-Version: roxterm ${VERSION}/' " \
"${TGT}"],
sources = xmlin,
targets = potfile,
deps = "${APPDATA_ITS}",
where = NOWHERE,
dir = podir,
use_shell = True))
if linguas:
for r in PoRulesFromLinguas(ctx, podir = podir,
modir = "${POXML_BUILD_DIR}",
sources = potfile):
ctx.add_rule(r)
sources = []
for l in parse_linguas(ctx, podir = podir):
sources.append(opj("${POXML_BUILD_DIR}", l + ".mo"))
ctx.add_rule(Rule( \
rule = "${ITSTOOL} -i ${APPDATA_ITS} -j " + xmlin +
" -o ${TGT} ${SRC}",
sources = sources,
targets = xmlout,
dir = podir,
where = NOWHERE))
else:
linguas = None
if not linguas:
ctx.add_rule(Rule(rule = "cp ${SRC} ${TGT}",
sources = "${ABS_TOP_DIR}/roxterm.appdata.xml.in",
targets = "${ABS_BUILD_DIR}/roxterm.appdata.xml",
where = NOWHERE))
elif ctx.mode == "install" or ctx.mode == "uninstall":
ctx.install_bin("roxterm roxterm-config")
ctx.install_data("roxterm-config.ui")
ctx.install_data("roxterm.desktop", "${DATADIR}/applications")
ctx.install_data("roxterm.appdata.xml", "${DATADIR}/appdata")
if ctx.env['XMLTOMAN']:
ctx.install_man("roxterm.1 roxterm-config.1")
ctx.install_doc("AUTHORS ChangeLog README")
ctx.install_doc(ctx.glob("*.html",
subdir = ctx.subst("${TOP_DIR}/Help/en")),
"${HTMLDIR}/en")
ctx.install_doc(ctx.glob("*.png",
subdir = ctx.subst("${TOP_DIR}/Help/lib")),
"${HTMLDIR}/lib")
ctx.install_doc(ctx.glob("*.css",
subdir = ctx.subst("${TOP_DIR}/Help/lib")),
"${HTMLDIR}/lib")
ctx.install_data("roxterm.svg", "${DATADIR}/icons/hicolor/scalable/apps")
ctx.install_data(["Config/Colours/Tango", "Config/Colours/GTK"],
"${PKGDATADIR}/Config/Colours")
ctx.install_data("Config/Shortcuts/Default",
"${PKGDATADIR}/Config/Shortcuts")
gda = ctx.env['WITH_GNOME_DEFAULT_APPLICATIONS']
if gda:
ctx.install_data("roxterm.xml", gda)
linguas = parse_linguas(ctx)
if ctx.env['HAVE_GETTEXT']:
for l in linguas:
ctx.install_data("po/%s.mo" % l,
"${LOCALEDIR}/%s/LC_MESSAGES/roxterm.mo" % l,
other_options = "-T")
ptdir = ctx.subst("${DESTDIR}/${LOCALEDIR}/pt/LC_MESSAGES")
ctx.ensure_out_dir(ptdir)
call_subprocess(["ln", "-sfn",
"../../pt_BR/LC_MESSAGES/roxterm.mo", ptdir])
if ctx.env['HAVE_PO4A']:
for l in linguas:
if ctx.env['XMLTOMAN']:
ctx.install_man("po4a/%s/roxterm.1 po4a/%s/roxterm-config.1" % \
(l, l), opj("${MANDIR}", l))
ctx.install_doc( \
ctx.glob("*.html",
subdir = ctx.subst("${TOP_DIR}/Help/%s" % l)),
"${HTMLDIR}/%s" % l)
ptdir = ctx.subst("${DESTDIR}/${MANDIR}/pt/man1")
ctx.ensure_out_dir(ptdir)
call_subprocess(["ln", "-sfn", "../../pt_BR/man1/roxterm.1", ptdir])
call_subprocess(["ln", "-sfn", "../../pt_BR/man1/roxterm-config.1",
ptdir])
call_subprocess(["ln", "-sfn", "pt_BR",
ctx.subst("${DESTDIR}/${HTMLDIR}/pt")])
elif ctx.mode == 'pristine' or ctx.mode == 'clean':
clean = ["${TOP_DIR}/maitch.pyc"] + \
["${TOP_DIR}/.ycm_extra_conf.py",
"${TOP_DIR}/.ycm_extra_conf.pyc"] + \
ctx.glob("*.po~", "${TOP_DIR}", "po") + \
ctx.glob("*.po~", "${TOP_DIR}", "po4a") + \
ctx.glob("*.po~", "${TOP_DIR}", "poxml")
if ctx.mode == 'pristine':
clean += [APPINFO, VFILE, "${TOP_DIR}/ChangeLog"] + \
["${TOP_DIR}/Help/" + f for f in \
"AUTHORS COPYING COPYING-LGPL Changes NEWS README".split()] + \
["${TOP_DIR}/Help/lib/" + f for f in \
"favicon.ico logo_text.png roxterm_logo.png".split()] + \
ctx.glob("*.pot", "${TOP_DIR}", "po") + \
ctx.glob("*.pot", "${TOP_DIR}", "po4a") + \
ctx.glob("*.pot", "${TOP_DIR}", "poxml")
for f in clean:
ctx.delete(f)
# Generated HTML doesn't go in tarball so must be cleaned
f = open(ctx.subst("${TOP_DIR}/po4a/LINGUAS"), 'r')
hd = ctx.subst("${TOP_DIR}/Help/")
for d in [hd + l.strip() for l in f.readlines() + ['pt']]:
recursively_remove(d, False, [])
f.close()
elif ctx.mode == 'dist':
ctx.subst_file(APPINFO + ".in", APPINFO)
ctx.add_dist("AUTHORS Help/AUTHORS " \
"genlog ChangeLog ChangeLog.old Config " \
"COPYING COPYING-LGPL Help/en Help/lib/header.png " \
"Help/lib/logo_text_only.png " \
"Help/lib/roxterm.css Help/lib/roxterm_ie.css "
"Help/lib/sprites.png " \
"INSTALL INSTALL.Debian " \
"NEWS README README.translations " \
"roxterm.1.xml.in roxterm-config.1.xml.in " \
"roxterm.appdata.xml.in " \
"roxterm.desktop roxterm.lsm.in roxterm.spec.in " \
"roxterm.svg roxterm.xml TODO " \
"src/roxterm-config.glade src/roxterm-config.ui " \
".ycm_extra_conf.py.in")
ctx.add_dist([f.replace("${TOP_DIR}/", "") \
for f in [LOGO_PNG, FAVICON, TEXT_LOGO]])
ctx.add_dist(ctx.glob("*.[c|h]", os.curdir, "src"))
# maitch-specific
ctx.add_dist("version maitch.py mscript.py")
# ROX bits
ctx.add_dist("AppInfo.xml.in AppRun .DirIcon " \
"Help/Changes Help/COPYING Help/COPYING-LGPL Help/NEWS Help/README")
if os.path.exists("AppInfo.xml"):
ctx.add_dist("AppInfo.xml")
# Translations
for f in ("po/LINGUAS", "po4a/LINGUAS", "poxml/LINGUAS",
"po/POTFILES.in", "po/roxterm.pot",
"poxml/appdata.its", "poxml/roxterm.appdata.xml.pot"):
if os.path.exists(f):
ctx.add_dist(f)
files = ctx.glob("*.po", os.curdir, "po") + \
ctx.glob("*.po", os.curdir, "po4a") + \
ctx.glob("*.pot", os.curdir, "po4a") + \
ctx.glob("*.po", os.curdir, "poxml")
if files:
ctx.add_dist(files)
ctx.run()
if ctx.mode == 'uninstall':
ctx.prune_directory("${DATADIR}/icons")
ctx.prune_directory("${PKGDATADIR}")
ctx.prune_directory("${DOCDIR}")
ctx.prune_directory("${HTMLDIR}")
elif ctx.mode == 'uninstall':
basedir = self.subst("${PACKAGE}-${VERSION}")
filename = os.path.abspath(
self.subst("${BUILD_DIR}/%s.%s" % (basedir, suffix)))
mprint("Creating %s '%s'" % (zname, filename))
| gpl-2.0 | 2,718,585,511,010,426,400 | 6,938,686,150,239,518,000 | 40.227972 | 80 | 0.490875 | false |
kondrak/bgfx | 3rdparty/scintilla/scripts/FileGenerator.py | 74 | 6509 | #!/usr/bin/env python
# FileGenerator.py - implemented 2013 by Neil Hodgson [email protected]
# Released to the public domain.
# Generate or regenerate source files based on comments in those files.
# May be modified in-place or a template may be generated into a complete file.
# Requires Python 2.5 or later
# The files are copied to a string apart from sections between a
# ++Autogenerated comment and a --Autogenerated comment which is
# generated by the CopyWithInsertion function. After the whole string is
# instantiated, it is compared with the target file and if different the file
# is rewritten.
from __future__ import with_statement
import codecs, os, re, string, sys
lineEnd = "\r\n" if sys.platform == "win32" else "\n"
def UpdateFile(filename, updated):
""" If the file contents are different to updated then copy updated into the
file else leave alone so Mercurial and make don't treat it as modified. """
newOrChanged = "Changed"
try:
with codecs.open(filename, "r", "utf-8") as infile:
original = infile.read()
if updated == original:
# Same as before so don't write
return
os.unlink(filename)
except IOError: # File is not there yet
newOrChanged = "New"
with codecs.open(filename, "w", "utf-8") as outfile:
outfile.write(updated)
print("%s %s" % (newOrChanged, filename))
# Automatically generated sections contain start and end comments,
# a definition line and the results.
# The results are replaced by regenerating based on the definition line.
# The definition line is a comment prefix followed by "**".
# If there is a digit after the ** then this indicates which list to use
# and the digit and next character are not part of the definition
# Backslash is used as an escape within the definition line.
# The part between \( and \) is repeated for each item in the list.
# \* is replaced by each list item. \t, and \n are tab and newline.
# If there is no definition line than the first list is copied verbatim.
# If retainDefs then the comments controlling generation are copied.
def CopyWithInsertion(input, commentPrefix, retainDefs, lists):
copying = 1
generated = False
listid = 0
output = []
for line in input.splitlines(0):
isStartGenerated = line.lstrip().startswith(commentPrefix + "++Autogenerated")
if copying and not isStartGenerated:
output.append(line)
if isStartGenerated:
if retainDefs:
output.append(line)
copying = 0
generated = False
elif not copying and not generated:
# Generating
if line.startswith(commentPrefix + "**"):
# Pattern to transform input data
if retainDefs:
output.append(line)
definition = line[len(commentPrefix + "**"):]
if (commentPrefix == "<!--") and (" -->" in definition):
definition = definition.replace(" -->", "")
listid = 0
if definition[0] in string.digits:
listid = int(definition[:1])
definition = definition[2:]
# Hide double slashes as a control character
definition = definition.replace("\\\\", "\001")
# Do some normal C style transforms
definition = definition.replace("\\n", "\n")
definition = definition.replace("\\t", "\t")
# Get the doubled backslashes back as single backslashes
definition = definition.replace("\001", "\\")
startRepeat = definition.find("\\(")
endRepeat = definition.find("\\)")
intro = definition[:startRepeat]
out = ""
if intro.endswith("\n"):
pos = 0
else:
pos = len(intro)
out += intro
middle = definition[startRepeat+2:endRepeat]
for i in lists[listid]:
item = middle.replace("\\*", i)
if pos and (pos + len(item) >= 80):
out += "\\\n"
pos = 0
out += item
pos += len(item)
if item.endswith("\n"):
pos = 0
outro = definition[endRepeat+2:]
out += outro
out = out.replace("\n", lineEnd) # correct EOLs in generated content
output.append(out)
else:
# Simple form with no rule to transform input
output.extend(lists[0])
generated = True
if line.lstrip().startswith(commentPrefix + "--Autogenerated") or \
line.lstrip().startswith(commentPrefix + "~~Autogenerated"):
copying = 1
if retainDefs:
output.append(line)
output = [line.rstrip(" \t") for line in output] # trim trailing whitespace
return lineEnd.join(output) + lineEnd
def GenerateFile(inpath, outpath, commentPrefix, retainDefs, *lists):
"""Generate 'outpath' from 'inpath'.
"""
try:
with codecs.open(inpath, "r", "UTF-8") as infile:
original = infile.read()
updated = CopyWithInsertion(original, commentPrefix,
retainDefs, lists)
UpdateFile(outpath, updated)
except IOError:
print("Can not open %s" % inpath)
def Generate(inpath, outpath, commentPrefix, *lists):
"""Generate 'outpath' from 'inpath'.
"""
GenerateFile(inpath, outpath, commentPrefix, inpath == outpath, *lists)
def Regenerate(filename, commentPrefix, *lists):
"""Regenerate the given file.
"""
Generate(filename, filename, commentPrefix, *lists)
def UpdateLineInFile(path, linePrefix, lineReplace):
lines = []
updated = False
with codecs.open(path, "r", "utf-8") as f:
for l in f.readlines():
l = l.rstrip()
if not updated and l.startswith(linePrefix):
lines.append(lineReplace)
updated = True
else:
lines.append(l)
contents = lineEnd.join(lines) + lineEnd
UpdateFile(path, contents)
def ReplaceREInFile(path, match, replace):
with codecs.open(path, "r", "utf-8") as f:
contents = f.read()
contents = re.sub(match, replace, contents)
UpdateFile(path, contents)
| bsd-2-clause | -134,859,416,599,723,060 | 4,645,112,440,792,513,000 | 40.196203 | 86 | 0.587033 | false |
egafford/sahara | sahara/tests/unit/service/validation/test_ng_template_validation_create.py | 3 | 15036 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara.service.api import v10 as api
from sahara.service.validations import node_group_template_schema as ngt_schema
from sahara.service.validations import node_group_templates as nt
from sahara.tests.unit.service.validation import utils as u
class TestNGTemplateCreateValidation(u.ValidationTestCase):
def setUp(self):
super(TestNGTemplateCreateValidation, self).setUp()
self._create_object_fun = nt.check_node_group_template_create
self.scheme = ngt_schema.NODE_GROUP_TEMPLATE_SCHEMA
api.plugin_base.setup_plugins()
def test_node_groups_create_required(self):
self._assert_create_object_validation(
data={
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'name' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'a'
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'flavor_id' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'hadoop_version' is a required property")
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1'
},
bad_req_i=(1, "VALIDATION_ERROR",
u"'node_processes' is a required property")
)
self._assert_create_object_validation(
data={
'name': "a",
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': []
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'node_processes: \[\] is too short')
)
def test_ng_template_create_v_names(self):
data = {
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode']
}
self._assert_valid_name_hostname_validation(data)
def test_ng_template_create_v_node_processes(self):
self._assert_create_object_validation(
data={
'name': "a",
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ["namenode", "namenode"]
},
bad_req_i=(1, 'INVALID_DATA',
'Duplicates in node processes have been detected')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['wrong_process']
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Plugin doesn't support the following node processes: "
"['wrong_process']")
)
@mock.patch(
"sahara.service.validations.base.check_volume_availability_zone_exist")
@mock.patch("sahara.service.validations.base.check_volume_type_exists")
@mock.patch(
"sahara.service.validations.base.check_availability_zone_exist")
@mock.patch("sahara.service.validations.base.check_security_groups_exist")
def test_ng_template_create_v_right(self,
s_groups, a_zone, v_type, v_a_zone):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode',
'datanode',
'tasktracker',
'jobtracker'],
'image_id': '550e8400-e29b-41d4-a716-446655440000',
'node_configs': {},
'volumes_per_node': 2,
'volumes_size': 10,
'volume_type': 'fish',
'volumes_availability_zone': 'ocean',
'volume_mount_prefix': '/tmp',
'description': "my node group",
'floating_ip_pool': 'd9a3bebc-f788-4b81-9a93-aa048022c1ca',
'security_groups': ['cat', 'dog'],
'auto_security_group': False,
'availability_zone': 'here',
'is_proxy_gateway': False,
'volume_local_to_instance': False,
'is_public': False,
'is_protected': False
}
)
def test_ng_template_create_v_nulls(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode',
'datanode',
'tasktracker',
'jobtracker'],
'image_id': None,
'node_configs': None,
'volumes_size': None,
'volume_type': None,
'volumes_availability_zone': None,
'volume_mount_prefix': None,
'description': None,
'floating_ip_pool': None,
'security_groups': None,
'auto_security_group': None,
'availability_zone': None,
'is_proxy_gateway': None,
'volume_local_to_instance': None,
'is_public': None,
'is_protected': None
}
)
def test_ng_template_create_v_minimum_ints(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['wrong_process'],
'volumes_per_node': -1
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'volumes_per_node: -1(.0)? is less than the minimum '
u'of 0')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['wrong_process'],
'volumes_size': 0
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'volumes_size: 0(.0)? is less than the minimum of 1')
)
def test_ng_template_create_v_types(self):
default_data = {
'name': 'a', 'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode']
}
self._assert_types(default_data)
def test_ng_template_create_v_unique_ng(self):
data = {
'name': 'test',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode']}
self._assert_create_object_validation(
data=data,
bad_req_i=(1, 'NAME_ALREADY_EXISTS',
"NodeGroup template with name 'test' already exists")
)
def test_ng_template_create_v_flavor_exists(self):
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '1',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode']
},
bad_req_i=(1, 'NOT_FOUND',
"Requested flavor '1' not found")
)
def test_ng_template_create_validate_image(self):
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode'],
'image_id': '12345'
},
bad_req_i=(1, 'VALIDATION_ERROR',
"image_id: '12345' is not a 'uuid'")
)
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode'],
'image_id': '12345678-1234-1234-1234-123456789000'
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Requested image "
"'12345678-1234-1234-1234-123456789000' "
"is not registered")
)
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode'],
'image_id': '813fe450-40d2-4acc-ade5-ea753a1bd5bc'
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Requested image "
"'813fe450-40d2-4acc-ade5-ea753a1bd5bc' "
"doesn't contain required tags: "
"['0.1']")
)
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode'],
'image_id': '550e8400-e29b-41d4-a716-446655440000'
}
)
def test_ng_template_create_v_ng_configs(self):
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode'],
'node_configs': {
'wrong_target': {
u'mapreduce.task.tmp.dir': '/temp/'
}
}},
bad_req_i=(1, 'INVALID_REFERENCE',
"Plugin doesn't contain applicable "
"target 'wrong_target'")
)
self._assert_create_object_validation(
data={
'name': 'test-ng',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['namenode'],
'node_configs': {
'general': {
's': 'a'
}
}
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Plugin's applicable target 'general' doesn't "
"contain config with name 's'")
)
def test_ng_template_cinder(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['wrong_process'],
'volumes_per_node': -1
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'volumes_per_node: -1(.0)? is less than the minimum '
u'of 0')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['wrong_process'],
'volumes_size': 0
},
bad_req_i=(1, 'VALIDATION_ERROR',
u'volumes_size: 0(.0)? is less than the minimum of 1')
)
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['datanode', 'tasktracker'],
'volumes_per_node': 1,
'volumes_size': 1,
'volume_mount_prefix': '/mnt/volume'
}
)
data = {
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['datanode', 'tasktracker'],
'volumes_per_node': 1,
'volumes_size': 1,
'volume_mount_prefix': 'qwerty'
}
self._assert_create_object_validation(
data=data,
bad_req_i=(1, 'VALIDATION_ERROR', "volume_mount_prefix: 'qwerty' "
"is not a 'posix_path'")
)
def test_wrong_floating_ip_pool(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['datanode', 'tasktracker'],
'floating_ip_pool': 'network_bad'
},
bad_req_i=(1, 'NOT_FOUND', "Floating IP pool network_bad "
"not found")
)
def test_volumes_per_node_without_volumes_size(self):
self._assert_create_object_validation(
data={
'name': 'a',
'flavor_id': '42',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'node_processes': ['datanode', 'tasktracker'],
'volumes_per_node': 1
},
bad_req_i=(1, 'INVALID_REFERENCE', "You must specify a "
"volumes_size parameter")
)
| apache-2.0 | 2,347,637,810,250,458,000 | 8,773,329,172,933,942,000 | 35.852941 | 79 | 0.444799 | false |
tcheehow/MissionPlanner | Lib/site-packages/scipy/ndimage/interpolation.py | 55 | 25609 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
import _ni_support
import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64):
"""
Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
input : array_like
The input array.
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is `numpy.float64`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order=3, output = numpy.float64):
"""
Multi-dimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
input : array_like
The input array.
mapping : callable
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Examples
--------
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> sp.ndimage.geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array.
coordinates : array_like
The coordinates at which `input` is evaluated.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
Parameters
----------
input : ndarray
The input array.
matrix : ndarray
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is assumed
that the matrix is diagonal. A more efficient algorithms is then
applied that exploits the separability of the problem.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The transformed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
input : ndarray
The input array.
shift : float or sequence, optional
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The shifted input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
zoom : float or sequence, optional
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The zoomed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes=(1, 0), reshape=True,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The rotated input. If `output` is given as a parameter, None is
returned.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size //= input.shape[axes[0]]
size //= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = range(input.ndim)
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
| gpl-3.0 | 1,072,217,115,633,782,400 | 4,165,901,245,474,484,700 | 37.860395 | 79 | 0.625913 | false |
tmerrick1/spack | lib/spack/external/jinja2/bccache.py | 84 | 12794 | # -*- coding: utf-8 -*-
"""
jinja2.bccache
~~~~~~~~~~~~~~
This module implements the bytecode cache system Jinja is optionally
using. This is useful if you have very complex template situations and
the compiliation of all those templates slow down your application too
much.
Situations where this is useful are often forking web applications that
are initialized on the first request.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
from os import path, listdir
import os
import sys
import stat
import errno
import marshal
import tempfile
import fnmatch
from hashlib import sha1
from jinja2.utils import open_if_exists
from jinja2._compat import BytesIO, pickle, PY2, text_type
# marshal works better on 3.x, one hack less required
if not PY2:
marshal_dump = marshal.dump
marshal_load = marshal.load
else:
def marshal_dump(code, f):
if isinstance(f, file):
marshal.dump(code, f)
else:
f.write(marshal.dumps(code))
def marshal_load(f):
if isinstance(f, file):
return marshal.load(f)
return marshal.loads(f.read())
bc_version = 3
# magic version used to only change with new jinja versions. With 2.6
# we change this to also take Python version changes into account. The
# reason for this is that Python tends to segfault if fed earlier bytecode
# versions because someone thought it would be a good idea to reuse opcodes
# or make Python incompatible with earlier versions.
bc_magic = 'j2'.encode('ascii') + \
pickle.dumps(bc_version, 2) + \
pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
class Bucket(object):
"""Buckets are used to store the bytecode for one template. It's created
and initialized by the bytecode cache and passed to the loading functions.
The buckets get an internal checksum from the cache assigned and use this
to automatically reject outdated cache material. Individual bytecode
cache subclasses don't have to care about cache invalidation.
"""
def __init__(self, environment, key, checksum):
self.environment = environment
self.key = key
self.checksum = checksum
self.reset()
def reset(self):
"""Resets the bucket (unloads the bytecode)."""
self.code = None
def load_bytecode(self, f):
"""Loads bytecode from a file or file like object."""
# make sure the magic header is correct
magic = f.read(len(bc_magic))
if magic != bc_magic:
self.reset()
return
# the source code of the file changed, we need to reload
checksum = pickle.load(f)
if self.checksum != checksum:
self.reset()
return
# if marshal_load fails then we need to reload
try:
self.code = marshal_load(f)
except (EOFError, ValueError, TypeError):
self.reset()
return
def write_bytecode(self, f):
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
raise TypeError('can\'t write empty bucket')
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal_dump(self.code, f)
def bytecode_from_string(self, string):
"""Load bytecode from a string."""
self.load_bytecode(BytesIO(string))
def bytecode_to_string(self):
"""Return the bytecode as string."""
out = BytesIO()
self.write_bytecode(out)
return out.getvalue()
class BytecodeCache(object):
"""To implement your own bytecode cache you have to subclass this class
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
these methods are passed a :class:`~jinja2.bccache.Bucket`.
A very basic bytecode cache that saves the bytecode on the file system::
from os import path
class MyCache(BytecodeCache):
def __init__(self, directory):
self.directory = directory
def load_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
if path.exists(filename):
with open(filename, 'rb') as f:
bucket.load_bytecode(f)
def dump_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
with open(filename, 'wb') as f:
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
Jinja2.
"""
def load_bytecode(self, bucket):
"""Subclasses have to override this method to load bytecode into a
bucket. If they are not able to find code in the cache for the
bucket, it must not do anything.
"""
raise NotImplementedError()
def dump_bytecode(self, bucket):
"""Subclasses have to override this method to write the bytecode
from a bucket back to the cache. If it unable to do so it must not
fail silently but raise an exception.
"""
raise NotImplementedError()
def clear(self):
"""Clears the cache. This method is not used by Jinja2 but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
hash = sha1(name.encode('utf-8'))
if filename is not None:
filename = '|' + filename
if isinstance(filename, text_type):
filename = filename.encode('utf-8')
hash.update(filename)
return hash.hexdigest()
def get_source_checksum(self, source):
"""Returns a checksum for the source."""
return sha1(source.encode('utf-8')).hexdigest()
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket
def set_bucket(self, bucket):
"""Put the bucket into the cache."""
self.dump_bytecode(bucket)
class FileSystemBytecodeCache(BytecodeCache):
"""A bytecode cache that stores bytecode on the filesystem. It accepts
two arguments: The directory where the cache items are stored and a
pattern string that is used to build the filename.
If no directory is specified a default cache directory is selected. On
Windows the user's temp directory is used, on UNIX systems a directory
is created for the user in the system temp directory.
The pattern can be used to have multiple separate caches operate on the
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
is replaced with the cache key.
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
This bytecode cache supports clearing of the cache using the clear method.
"""
def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
self.pattern = pattern
def _get_default_cache_dir(self):
def _unsafe_dir():
raise RuntimeError('Cannot determine safe temp directory. You '
'need to explicitly provide one.')
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
if os.name == 'nt':
return tmpdir
if not hasattr(os, 'getuid'):
_unsafe_dir()
dirname = '_jinja2-cache-%d' % os.getuid()
actual_dir = os.path.join(tmpdir, dirname)
try:
os.mkdir(actual_dir, stat.S_IRWXU)
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.chmod(actual_dir, stat.S_IRWXU)
actual_dir_stat = os.lstat(actual_dir)
if actual_dir_stat.st_uid != os.getuid() \
or not stat.S_ISDIR(actual_dir_stat.st_mode) \
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
_unsafe_dir()
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
if actual_dir_stat.st_uid != os.getuid() \
or not stat.S_ISDIR(actual_dir_stat.st_mode) \
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
_unsafe_dir()
return actual_dir
def _get_cache_filename(self, bucket):
return path.join(self.directory, self.pattern % bucket.key)
def load_bytecode(self, bucket):
f = open_if_exists(self._get_cache_filename(bucket), 'rb')
if f is not None:
try:
bucket.load_bytecode(f)
finally:
f.close()
def dump_bytecode(self, bucket):
f = open(self._get_cache_filename(bucket), 'wb')
try:
bucket.write_bytecode(f)
finally:
f.close()
def clear(self):
# imported lazily here because google app-engine doesn't support
# write access on the file system and the function does not exist
# normally.
from os import remove
files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
for filename in files:
try:
remove(path.join(self.directory, filename))
except OSError:
pass
class MemcachedBytecodeCache(BytecodeCache):
"""This class implements a bytecode cache that uses a memcache cache for
storing the information. It does not enforce a specific memcache library
(tummy's memcache or cmemcache) but will accept any class that provides
the minimal interface required.
Libraries compatible with this class:
- `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
- `python-memcached <https://www.tummy.com/Community/software/python-memcached/>`_
- `cmemcache <http://gijsbert.org/cmemcache/>`_
(Unfortunately the django cache interface is not compatible because it
does not support storing binary data, only unicode. You can however pass
the underlying cache client to the bytecode cache which is available
as `django.core.cache.cache._client`.)
The minimal interface for the client passed to the constructor is this:
.. class:: MinimalClientInterface
.. method:: set(key, value[, timeout])
Stores the bytecode in the cache. `value` is a string and
`timeout` the timeout of the key. If timeout is not provided
a default timeout or no timeout should be assumed, if it's
provided it's an integer with the number of seconds the cache
item should exist.
.. method:: get(key)
Returns the value for the cache key. If the item does not
exist in the cache the return value must be `None`.
The other arguments to the constructor are the prefix for all keys that
is added before the actual cache key and the timeout for the bytecode in
the cache system. We recommend a high (or no) timeout.
This bytecode cache does not support clearing of used items in the cache.
The clear method is a no-operation function.
.. versionadded:: 2.7
Added support for ignoring memcache errors through the
`ignore_memcache_errors` parameter.
"""
def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
ignore_memcache_errors=True):
self.client = client
self.prefix = prefix
self.timeout = timeout
self.ignore_memcache_errors = ignore_memcache_errors
def load_bytecode(self, bucket):
try:
code = self.client.get(self.prefix + bucket.key)
except Exception:
if not self.ignore_memcache_errors:
raise
code = None
if code is not None:
bucket.bytecode_from_string(code)
def dump_bytecode(self, bucket):
args = (self.prefix + bucket.key, bucket.bytecode_to_string())
if self.timeout is not None:
args += (self.timeout,)
try:
self.client.set(*args)
except Exception:
if not self.ignore_memcache_errors:
raise
| lgpl-2.1 | -6,806,493,737,268,771,000 | -9,124,093,364,799,344,000 | 34.342541 | 88 | 0.624199 | false |
jaimahajan1997/sympy | sympy/combinatorics/testutil.py | 33 | 11004 | from __future__ import print_function, division
from sympy.core.compatibility import range
from sympy.combinatorics.util import _distribute_gens_by_base
from sympy.combinatorics import Permutation
rmul = Permutation.rmul
def _cmp_perm_lists(first, second):
"""
Compare two lists of permutations as sets.
This is used for testing purposes. Since the array form of a
permutation is currently a list, Permutation is not hashable
and cannot be put into a set.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.testutil import _cmp_perm_lists
>>> a = Permutation([0, 2, 3, 4, 1])
>>> b = Permutation([1, 2, 0, 4, 3])
>>> c = Permutation([3, 4, 0, 1, 2])
>>> ls1 = [a, b, c]
>>> ls2 = [b, c, a]
>>> _cmp_perm_lists(ls1, ls2)
True
"""
return {tuple(a) for a in first} == \
{tuple(a) for a in second}
def _naive_list_centralizer(self, other, af=False):
from sympy.combinatorics.perm_groups import PermutationGroup
"""
Return a list of elements for the centralizer of a subgroup/set/element.
This is a brute force implementation that goes over all elements of the
group and checks for membership in the centralizer. It is used to
test ``.centralizer()`` from ``sympy.combinatorics.perm_groups``.
Examples
========
>>> from sympy.combinatorics.testutil import _naive_list_centralizer
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> _naive_list_centralizer(D, D)
[Permutation([0, 1, 2, 3]), Permutation([2, 3, 0, 1])]
See Also
========
sympy.combinatorics.perm_groups.centralizer
"""
from sympy.combinatorics.permutations import _af_commutes_with
if hasattr(other, 'generators'):
elements = list(self.generate_dimino(af=True))
gens = [x._array_form for x in other.generators]
commutes_with_gens = lambda x: all(_af_commutes_with(x, gen) for gen in gens)
centralizer_list = []
if not af:
for element in elements:
if commutes_with_gens(element):
centralizer_list.append(Permutation._af_new(element))
else:
for element in elements:
if commutes_with_gens(element):
centralizer_list.append(element)
return centralizer_list
elif hasattr(other, 'getitem'):
return _naive_list_centralizer(self, PermutationGroup(other), af)
elif hasattr(other, 'array_form'):
return _naive_list_centralizer(self, PermutationGroup([other]), af)
def _verify_bsgs(group, base, gens):
"""
Verify the correctness of a base and strong generating set.
This is a naive implementation using the definition of a base and a strong
generating set relative to it. There are other procedures for
verifying a base and strong generating set, but this one will
serve for more robust testing.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> _verify_bsgs(A, A.base, A.strong_gens)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims
"""
from sympy.combinatorics.perm_groups import PermutationGroup
strong_gens_distr = _distribute_gens_by_base(base, gens)
current_stabilizer = group
for i in range(len(base)):
candidate = PermutationGroup(strong_gens_distr[i])
if current_stabilizer.order() != candidate.order():
return False
current_stabilizer = current_stabilizer.stabilizer(base[i])
if current_stabilizer.order() != 1:
return False
return True
def _verify_centralizer(group, arg, centr=None):
"""
Verify the centralizer of a group/set/element inside another group.
This is used for testing ``.centralizer()`` from
``sympy.combinatorics.perm_groups``
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.testutil import _verify_centralizer
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> centr = PermutationGroup([Permutation([0, 1, 2, 3, 4])])
>>> _verify_centralizer(S, A, centr)
True
See Also
========
_naive_list_centralizer,
sympy.combinatorics.perm_groups.PermutationGroup.centralizer,
_cmp_perm_lists
"""
if centr is None:
centr = group.centralizer(arg)
centr_list = list(centr.generate_dimino(af=True))
centr_list_naive = _naive_list_centralizer(group, arg, af=True)
return _cmp_perm_lists(centr_list, centr_list_naive)
def _verify_normal_closure(group, arg, closure=None):
from sympy.combinatorics.perm_groups import PermutationGroup
"""
Verify the normal closure of a subgroup/subset/element in a group.
This is used to test
sympy.combinatorics.perm_groups.PermutationGroup.normal_closure
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.testutil import _verify_normal_closure
>>> S = SymmetricGroup(3)
>>> A = AlternatingGroup(3)
>>> _verify_normal_closure(S, A, closure=A)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.normal_closure
"""
if closure is None:
closure = group.normal_closure(arg)
conjugates = set()
if hasattr(arg, 'generators'):
subgr_gens = arg.generators
elif hasattr(arg, '__getitem__'):
subgr_gens = arg
elif hasattr(arg, 'array_form'):
subgr_gens = [arg]
for el in group.generate_dimino():
for gen in subgr_gens:
conjugates.add(gen ^ el)
naive_closure = PermutationGroup(list(conjugates))
return closure.is_subgroup(naive_closure)
def canonicalize_naive(g, dummies, sym, *v):
"""
Canonicalize tensor formed by tensors of the different types
g permutation representing the tensor
dummies list of dummy indices
msym symmetry of the metric
v is a list of (base_i, gens_i, n_i, sym_i) for tensors of type `i`
base_i, gens_i BSGS for tensors of this type
n_i number ot tensors of type `i`
sym_i symmetry under exchange of two component tensors of type `i`
None no symmetry
0 commuting
1 anticommuting
Return 0 if the tensor is zero, else return the array form of
the permutation representing the canonical form of the tensor.
Examples
========
>>> from sympy.combinatorics.testutil import canonicalize_naive
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> g = Permutation([1, 3, 2, 0, 4, 5])
>>> base2, gens2 = get_symmetric_group_sgs(2)
>>> canonicalize_naive(g, [2, 3], 0, (base2, gens2, 2, 0))
[0, 2, 1, 3, 4, 5]
"""
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.tensor_can import gens_products, dummy_sgs
from sympy.combinatorics.permutations import Permutation, _af_rmul
v1 = []
for i in range(len(v)):
base_i, gens_i, n_i, sym_i = v[i]
v1.append((base_i, gens_i, [[]]*n_i, sym_i))
size, sbase, sgens = gens_products(*v1)
dgens = dummy_sgs(dummies, sym, size-2)
if isinstance(sym, int):
num_types = 1
dummies = [dummies]
sym = [sym]
else:
num_types = len(sym)
dgens = []
for i in range(num_types):
dgens.extend(dummy_sgs(dummies[i], sym[i], size - 2))
S = PermutationGroup(sgens)
D = PermutationGroup([Permutation(x) for x in dgens])
dlist = list(D.generate(af=True))
g = g.array_form
st = set()
for s in S.generate(af=True):
h = _af_rmul(g, s)
for d in dlist:
q = tuple(_af_rmul(d, h))
st.add(q)
a = list(st)
a.sort()
prev = (0,)*size
for h in a:
if h[:-2] == prev[:-2]:
if h[-1] != prev[-1]:
return 0
prev = h
return list(a[0])
def graph_certificate(gr):
"""
Return a certificate for the graph
gr adjacency list
The graph is assumed to be unoriented and without
external lines.
Associate to each vertex of the graph a symmetric tensor with
number of indices equal to the degree of the vertex; indices
are contracted when they correspond to the same line of the graph.
The canonical form of the tensor gives a certificate for the graph.
This is not an efficient algorithm to get the certificate of a graph.
Examples
========
>>> from sympy.combinatorics.testutil import graph_certificate
>>> gr1 = {0:[1, 2, 3, 5], 1:[0, 2, 4], 2:[0, 1, 3, 4], 3:[0, 2, 4], 4:[1, 2, 3, 5], 5:[0, 4]}
>>> gr2 = {0:[1, 5], 1:[0, 2, 3, 4], 2:[1, 3, 5], 3:[1, 2, 4, 5], 4:[1, 3, 5], 5:[0, 2, 3, 4]}
>>> c1 = graph_certificate(gr1)
>>> c2 = graph_certificate(gr2)
>>> c1
[0, 2, 4, 6, 1, 8, 10, 12, 3, 14, 16, 18, 5, 9, 15, 7, 11, 17, 13, 19, 20, 21]
>>> c1 == c2
True
"""
from sympy.combinatorics.permutations import _af_invert
from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize
items = list(gr.items())
items.sort(key=lambda x: len(x[1]), reverse=True)
pvert = [x[0] for x in items]
pvert = _af_invert(pvert)
# the indices of the tensor are twice the number of lines of the graph
num_indices = 0
for v, neigh in items:
num_indices += len(neigh)
# associate to each vertex its indices; for each line
# between two vertices assign the
# even index to the vertex which comes first in items,
# the odd index to the other vertex
vertices = [[] for i in items]
i = 0
for v, neigh in items:
for v2 in neigh:
if pvert[v] < pvert[v2]:
vertices[pvert[v]].append(i)
vertices[pvert[v2]].append(i+1)
i += 2
g = []
for v in vertices:
g.extend(v)
assert len(g) == num_indices
g += [num_indices, num_indices + 1]
size = num_indices + 2
assert sorted(g) == list(range(size))
g = Permutation(g)
vlen = [0]*(len(vertices[0])+1)
for neigh in vertices:
vlen[len(neigh)] += 1
v = []
for i in range(len(vlen)):
n = vlen[i]
if n:
base, gens = get_symmetric_group_sgs(i)
v.append((base, gens, n, 0))
v.reverse()
dummies = list(range(num_indices))
can = canonicalize(g, dummies, 0, *v)
return can
| bsd-3-clause | -3,192,529,639,707,026,000 | 8,232,991,771,654,076,000 | 31.556213 | 98 | 0.624682 | false |
hynnet/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/ctypes/macholib/dyld.py | 253 | 5341 | ######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
"""
dyld emulation
"""
import os
from framework import framework_info
from dylib import dylib_info
from itertools import *
__all__ = [
'dyld_find', 'framework_find',
'framework_info', 'dylib_info',
]
# These are the defaults as per man dyld(1)
#
DEFAULT_FRAMEWORK_FALLBACK = [
os.path.expanduser("~/Library/Frameworks"),
"/Library/Frameworks",
"/Network/Library/Frameworks",
"/System/Library/Frameworks",
]
DEFAULT_LIBRARY_FALLBACK = [
os.path.expanduser("~/lib"),
"/usr/local/lib",
"/lib",
"/usr/lib",
]
def ensure_utf8(s):
"""Not all of PyObjC and Python understand unicode paths very well yet"""
if isinstance(s, unicode):
return s.encode('utf8')
return s
def dyld_env(env, var):
if env is None:
env = os.environ
rval = env.get(var)
if rval is None:
return []
return rval.split(':')
def dyld_image_suffix(env=None):
if env is None:
env = os.environ
return env.get('DYLD_IMAGE_SUFFIX')
def dyld_framework_path(env=None):
return dyld_env(env, 'DYLD_FRAMEWORK_PATH')
def dyld_library_path(env=None):
return dyld_env(env, 'DYLD_LIBRARY_PATH')
def dyld_fallback_framework_path(env=None):
return dyld_env(env, 'DYLD_FALLBACK_FRAMEWORK_PATH')
def dyld_fallback_library_path(env=None):
return dyld_env(env, 'DYLD_FALLBACK_LIBRARY_PATH')
def dyld_image_suffix_search(iterator, env=None):
"""For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics"""
suffix = dyld_image_suffix(env)
if suffix is None:
return iterator
def _inject(iterator=iterator, suffix=suffix):
for path in iterator:
if path.endswith('.dylib'):
yield path[:-len('.dylib')] + suffix + '.dylib'
else:
yield path + suffix
yield path
return _inject()
def dyld_override_search(name, env=None):
# If DYLD_FRAMEWORK_PATH is set and this dylib_name is a
# framework name, use the first file that exists in the framework
# path if any. If there is none go on to search the DYLD_LIBRARY_PATH
# if any.
framework = framework_info(name)
if framework is not None:
for path in dyld_framework_path(env):
yield os.path.join(path, framework['name'])
# If DYLD_LIBRARY_PATH is set then use the first file that exists
# in the path. If none use the original name.
for path in dyld_library_path(env):
yield os.path.join(path, os.path.basename(name))
def dyld_executable_path_search(name, executable_path=None):
# If we haven't done any searching and found a library and the
# dylib_name starts with "@executable_path/" then construct the
# library name.
if name.startswith('@executable_path/') and executable_path is not None:
yield os.path.join(executable_path, name[len('@executable_path/'):])
def dyld_default_search(name, env=None):
yield name
framework = framework_info(name)
if framework is not None:
fallback_framework_path = dyld_fallback_framework_path(env)
for path in fallback_framework_path:
yield os.path.join(path, framework['name'])
fallback_library_path = dyld_fallback_library_path(env)
for path in fallback_library_path:
yield os.path.join(path, os.path.basename(name))
if framework is not None and not fallback_framework_path:
for path in DEFAULT_FRAMEWORK_FALLBACK:
yield os.path.join(path, framework['name'])
if not fallback_library_path:
for path in DEFAULT_LIBRARY_FALLBACK:
yield os.path.join(path, os.path.basename(name))
def dyld_find(name, executable_path=None, env=None):
"""
Find a library or framework using dyld semantics
"""
name = ensure_utf8(name)
executable_path = ensure_utf8(executable_path)
for path in dyld_image_suffix_search(chain(
dyld_override_search(name, env),
dyld_executable_path_search(name, executable_path),
dyld_default_search(name, env),
), env):
if os.path.isfile(path):
return path
raise ValueError("dylib %s could not be found" % (name,))
def framework_find(fn, executable_path=None, env=None):
"""
Find a framework using dyld semantics in a very loose manner.
Will take input such as:
Python
Python.framework
Python.framework/Versions/Current
"""
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError, e:
pass
fmwk_index = fn.rfind('.framework')
if fmwk_index == -1:
fmwk_index = len(fn)
fn += '.framework'
fn = os.path.join(fn, os.path.basename(fn[:fmwk_index]))
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError:
raise e
def test_dyld_find():
env = {}
assert dyld_find('libSystem.dylib') == '/usr/lib/libSystem.dylib'
assert dyld_find('System.framework/System') == '/System/Library/Frameworks/System.framework/System'
if __name__ == '__main__':
test_dyld_find()
| gpl-2.0 | 7,546,743,023,709,832,000 | 7,924,626,418,931,850,000 | 30.60355 | 103 | 0.628347 | false |
web30s/odoo-9.0c-20160402 | hello/templates/openerp/addons/hr_contract/hr_contract.py | 44 | 5741 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
class hr_employee(osv.osv):
_name = "hr.employee"
_description = "Employee"
_inherit = "hr.employee"
def _get_latest_contract(self, cr, uid, ids, field_name, args, context=None):
res = {}
obj_contract = self.pool.get('hr.contract')
for emp in self.browse(cr, uid, ids, context=context):
contract_ids = obj_contract.search(cr, uid, [('employee_id', '=', emp.id)], order='date_start', context=context)
if contract_ids:
res[emp.id] = contract_ids[-1:][0]
else:
res[emp.id] = False
return res
def _contracts_count(self, cr, uid, ids, field_name, arg, context=None):
Contract = self.pool['hr.contract']
return {
employee_id: Contract.search_count(cr, SUPERUSER_ID, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'manager': fields.boolean('Is a Manager'),
'medic_exam': fields.date('Medical Examination Date'),
'place_of_birth': fields.char('Place of Birth'),
'children': fields.integer('Number of Children'),
'vehicle': fields.char('Company Vehicle'),
'vehicle_distance': fields.integer('Home-Work Dist.', help="In kilometers"),
'contract_ids': fields.one2many('hr.contract', 'employee_id', 'Contracts'),
'contract_id': fields.function(_get_latest_contract, string='Current Contract', type='many2one', relation="hr.contract", help='Latest contract of the employee'),
'contracts_count': fields.function(_contracts_count, type='integer', string='Contracts'),
}
class hr_contract_type(osv.osv):
_name = 'hr.contract.type'
_description = 'Contract Type'
_order = 'sequence, id'
_columns = {
'name': fields.char('Contract Type', required=True),
'sequence': fields.integer('Sequence', help="Gives the sequence when displaying a list of Contract."),
}
defaults = {
'sequence': 10
}
class hr_contract(osv.osv):
_name = 'hr.contract'
_description = 'Contract'
_inherit = ['mail.thread', 'ir.needaction_mixin']
_columns = {
'name': fields.char('Contract Reference', required=True),
'employee_id': fields.many2one('hr.employee', "Employee", required=True),
'department_id': fields.many2one('hr.department', string="Department"),
'type_id': fields.many2one('hr.contract.type', "Contract Type", required=True),
'job_id': fields.many2one('hr.job', 'Job Title'),
'date_start': fields.date('Start Date', required=True),
'date_end': fields.date('End Date'),
'trial_date_start': fields.date('Trial Start Date'),
'trial_date_end': fields.date('Trial End Date'),
'working_hours': fields.many2one('resource.calendar', 'Working Schedule'),
'wage': fields.float('Wage', digits=(16, 2), required=True, help="Basic Salary of the employee"),
'advantages': fields.text('Advantages'),
'notes': fields.text('Notes'),
'permit_no': fields.char('Work Permit No', required=False, readonly=False),
'visa_no': fields.char('Visa No', required=False, readonly=False),
'visa_expire': fields.date('Visa Expire Date'),
'state': fields.selection(
[('draft', 'New'), ('open', 'Running'), ('pending', 'To Renew'), ('close', 'Expired')],
string='Status', track_visibility='onchange',
help='Status of the contract'),
}
def _get_type(self, cr, uid, context=None):
type_ids = self.pool.get('hr.contract.type').search(cr, uid, [], limit=1)
return type_ids and type_ids[0] or False
_defaults = {
'date_start': lambda *a: time.strftime("%Y-%m-%d"),
'type_id': _get_type,
'state': 'draft',
}
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
if not employee_id:
return {'value': {'job_id': False, 'department_id': False}}
emp_obj = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
job_id = dept_id = False
if emp_obj.job_id:
job_id = emp_obj.job_id.id
if emp_obj.department_id:
dept_id = emp_obj.department_id.id
return {'value': {'job_id': job_id, 'department_id': dept_id}}
def _check_dates(self, cr, uid, ids, context=None):
for contract in self.read(cr, uid, ids, ['date_start', 'date_end'], context=context):
if contract['date_start'] and contract['date_end'] and contract['date_start'] > contract['date_end']:
return False
return True
_constraints = [
(_check_dates, 'Error! Contract start-date must be less than contract end-date.', ['date_start', 'date_end'])
]
def set_as_pending(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'pending'}, context=context)
def set_as_close(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
def _track_subtype(self, cr, uid, ids, init_values, context=None):
record = self.browse(cr, uid, ids[0], context=context)
if 'state' in init_values and record.state == 'pending':
return 'hr_contract.mt_contract_pending'
elif 'state' in init_values and record.state == 'close':
return 'hr_contract.mt_contract_close'
return super(hr_contract, self)._track_subtype(cr, uid, ids, init_values, context=context)
| gpl-3.0 | -6,212,789,077,202,972,000 | -9,142,064,553,640,398,000 | 43.161538 | 169 | 0.606515 | false |
mhbu50/erpnext | erpnext/patches/v12_0/rename_lost_reason_detail.py | 3 | 1325 | from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Lost Reason Detail"):
frappe.reload_doc("crm", "doctype", "opportunity_lost_reason")
frappe.reload_doc("crm", "doctype", "opportunity_lost_reason_detail")
frappe.reload_doc("setup", "doctype", "quotation_lost_reason_detail")
frappe.db.sql("""INSERT INTO `tabOpportunity Lost Reason Detail` SELECT * FROM `tabLost Reason Detail` WHERE `parenttype` = 'Opportunity'""")
frappe.db.sql("""INSERT INTO `tabQuotation Lost Reason Detail` SELECT * FROM `tabLost Reason Detail` WHERE `parenttype` = 'Quotation'""")
frappe.db.sql("""INSERT INTO `tabQuotation Lost Reason` (`name`, `creation`, `modified`, `modified_by`, `owner`, `docstatus`, `parent`, `parentfield`, `parenttype`, `idx`, `_comments`, `_assign`, `_user_tags`, `_liked_by`, `order_lost_reason`)
SELECT o.`name`, o.`creation`, o.`modified`, o.`modified_by`, o.`owner`, o.`docstatus`, o.`parent`, o.`parentfield`, o.`parenttype`, o.`idx`, o.`_comments`, o.`_assign`, o.`_user_tags`, o.`_liked_by`, o.`lost_reason`
FROM `tabOpportunity Lost Reason` o LEFT JOIN `tabQuotation Lost Reason` q ON q.name = o.name WHERE q.name IS NULL""")
frappe.delete_doc("DocType", "Lost Reason Detail") | gpl-3.0 | 7,631,089,329,322,506,000 | -3,114,257,755,612,153,300 | 72.666667 | 251 | 0.657358 | false |
msmolens/VTK | ThirdParty/Twisted/twisted/internet/_sslverify.py | 23 | 58378 | # -*- test-case-name: twisted.test.test_sslverify -*-
# Copyright (c) 2005 Divmod, Inc.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import division, absolute_import
import itertools
import warnings
from hashlib import md5
from OpenSSL import SSL, crypto, version
try:
from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START
except ImportError:
SSL_CB_HANDSHAKE_START = 0x10
SSL_CB_HANDSHAKE_DONE = 0x20
from twisted.python import log
def _cantSetHostnameIndication(connection, hostname):
"""
The option to set SNI is not available, so do nothing.
@param connection: the connection
@type connection: L{OpenSSL.SSL.Connection}
@param hostname: the server's host name
@type: hostname: L{bytes}
"""
def _setHostNameIndication(connection, hostname):
"""
Set the server name indication on the given client connection to the given
value.
@param connection: the connection
@type connection: L{OpenSSL.SSL.Connection}
@param hostname: the server's host name
@type: hostname: L{bytes}
"""
connection.set_tlsext_host_name(hostname)
if getattr(SSL.Connection, "set_tlsext_host_name", None) is None:
_maybeSetHostNameIndication = _cantSetHostnameIndication
else:
_maybeSetHostNameIndication = _setHostNameIndication
class SimpleVerificationError(Exception):
"""
Not a very useful verification error.
"""
def _idnaBytes(text):
"""
Convert some text typed by a human into some ASCII bytes.
This is provided to allow us to use the U{partially-broken IDNA
implementation in the standard library <http://bugs.python.org/issue17305>}
if the more-correct U{idna <https://pypi.python.org/pypi/idna>} package is
not available; C{service_identity} is somewhat stricter about this.
@param text: A domain name, hopefully.
@type text: L{unicode}
@return: The domain name's IDNA representation, encoded as bytes.
@rtype: L{bytes}
"""
try:
import idna
except ImportError:
return text.encode("idna")
else:
return idna.encode(text).encode("ascii")
def _idnaText(octets):
"""
Convert some IDNA-encoded octets into some human-readable text.
Currently only used by the tests.
@param octets: Some bytes representing a hostname.
@type octets: L{bytes}
@return: A human-readable domain name.
@rtype: L{unicode}
"""
try:
import idna
except ImportError:
return octets.decode("idna")
else:
return idna.decode(octets)
def simpleVerifyHostname(connection, hostname):
"""
Check only the common name in the certificate presented by the peer and
only for an exact match.
This is to provide I{something} in the way of hostname verification to
users who haven't upgraded past OpenSSL 0.12 or installed
C{service_identity}. This check is overly strict, relies on a deprecated
TLS feature (you're supposed to ignore the commonName if the
subjectAlternativeName extensions are present, I believe), and lots of
valid certificates will fail.
@param connection: the OpenSSL connection to verify.@
@type connection: L{OpenSSL.SSL.Connection}
@param hostname: The hostname expected by the user.
@type hostname: L{unicode}
@raise twisted.internet.ssl.VerificationError: if the common name and
hostname don't match.
"""
commonName = connection.get_peer_certificate().get_subject().commonName
if commonName != hostname:
raise SimpleVerificationError(repr(commonName) + "!=" +
repr(hostname))
def _selectVerifyImplementation():
"""
U{service_identity <https://pypi.python.org/pypi/service_identity>}
requires pyOpenSSL 0.12 or better but our dependency is still back at 0.10.
Determine if pyOpenSSL has the requisite feature, and whether
C{service_identity} is installed. If so, use it. If not, use simplistic
and incorrect checking as implemented in L{simpleVerifyHostname}.
@return: 2-tuple of (C{verify_hostname}, C{VerificationError})
@rtype: L{tuple}
"""
whatsWrong = (
"Without the service_identity module and a recent enough pyOpenSSL to"
"support it, Twisted can perform only rudimentary TLS client hostname"
"verification. Many valid certificate/hostname mappings may be "
"rejected."
)
if hasattr(crypto.X509, "get_extension_count"):
try:
from service_identity import VerificationError
from service_identity.pyopenssl import verify_hostname
return verify_hostname, VerificationError
except ImportError:
warnings.warn(
"You do not have the service_identity module installed. "
"Please install it from "
"<https://pypi.python.org/pypi/service_identity>. "
+ whatsWrong,
UserWarning,
stacklevel=2
)
else:
warnings.warn(
"Your version of pyOpenSSL, {0}, is out of date. "
"Please upgrade to at least 0.12 and install service_identity "
"from <https://pypi.python.org/pypi/service_identity>. "
.format(version.__version__) + whatsWrong,
UserWarning,
stacklevel=2
)
return simpleVerifyHostname, SimpleVerificationError
verifyHostname, VerificationError = _selectVerifyImplementation()
from zope.interface import Interface, implementer
from twisted.internet.defer import Deferred
from twisted.internet.error import VerifyError, CertificateError
from twisted.internet.interfaces import (
IAcceptableCiphers, ICipher, IOpenSSLClientConnectionCreator
)
from twisted.python import reflect, util
from twisted.python.deprecate import _mutuallyExclusiveArguments
from twisted.python.compat import nativeString, networkString, unicode
from twisted.python.failure import Failure
from twisted.python.util import FancyEqMixin
def _sessionCounter(counter=itertools.count()):
"""
Private - shared between all OpenSSLCertificateOptions, counts up to
provide a unique session id for each context.
"""
return next(counter)
_x509names = {
'CN': 'commonName',
'commonName': 'commonName',
'O': 'organizationName',
'organizationName': 'organizationName',
'OU': 'organizationalUnitName',
'organizationalUnitName': 'organizationalUnitName',
'L': 'localityName',
'localityName': 'localityName',
'ST': 'stateOrProvinceName',
'stateOrProvinceName': 'stateOrProvinceName',
'C': 'countryName',
'countryName': 'countryName',
'emailAddress': 'emailAddress'}
class DistinguishedName(dict):
"""
Identify and describe an entity.
Distinguished names are used to provide a minimal amount of identifying
information about a certificate issuer or subject. They are commonly
created with one or more of the following fields::
commonName (CN)
organizationName (O)
organizationalUnitName (OU)
localityName (L)
stateOrProvinceName (ST)
countryName (C)
emailAddress
A L{DistinguishedName} should be constructed using keyword arguments whose
keys can be any of the field names above (as a native string), and the
values are either Unicode text which is encodable to ASCII, or C{bytes}
limited to the ASCII subset. Any fields passed to the constructor will be
set as attributes, accessable using both their extended name and their
shortened acronym. The attribute values will be the ASCII-encoded
bytes. For example::
>>> dn = DistinguishedName(commonName=b'www.example.com',
C='US')
>>> dn.C
b'US'
>>> dn.countryName
b'US'
>>> hasattr(dn, "organizationName")
False
L{DistinguishedName} instances can also be used as dictionaries; the keys
are extended name of the fields::
>>> dn.keys()
['countryName', 'commonName']
>>> dn['countryName']
b'US'
"""
__slots__ = ()
def __init__(self, **kw):
for k, v in kw.items():
setattr(self, k, v)
def _copyFrom(self, x509name):
for name in _x509names:
value = getattr(x509name, name, None)
if value is not None:
setattr(self, name, value)
def _copyInto(self, x509name):
for k, v in self.items():
setattr(x509name, k, nativeString(v))
def __repr__(self):
return '<DN %s>' % (dict.__repr__(self)[1:-1])
def __getattr__(self, attr):
try:
return self[_x509names[attr]]
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value):
if attr not in _x509names:
raise AttributeError("%s is not a valid OpenSSL X509 name field" % (attr,))
realAttr = _x509names[attr]
if not isinstance(value, bytes):
value = value.encode("ascii")
self[realAttr] = value
def inspect(self):
"""
Return a multi-line, human-readable representation of this DN.
@rtype: C{str}
"""
l = []
lablen = 0
def uniqueValues(mapping):
return set(mapping.values())
for k in sorted(uniqueValues(_x509names)):
label = util.nameToLabel(k)
lablen = max(len(label), lablen)
v = getattr(self, k, None)
if v is not None:
l.append((label, nativeString(v)))
lablen += 2
for n, (label, attr) in enumerate(l):
l[n] = (label.rjust(lablen)+': '+ attr)
return '\n'.join(l)
DN = DistinguishedName
class CertBase:
"""
Base class for public (certificate only) and private (certificate + key
pair) certificates.
@ivar original: The underlying OpenSSL certificate object.
@type original: L{OpenSSL.crypto.X509}
"""
def __init__(self, original):
self.original = original
def _copyName(self, suffix):
dn = DistinguishedName()
dn._copyFrom(getattr(self.original, 'get_'+suffix)())
return dn
def getSubject(self):
"""
Retrieve the subject of this certificate.
@return: A copy of the subject of this certificate.
@rtype: L{DistinguishedName}
"""
return self._copyName('subject')
def __conform__(self, interface):
"""
Convert this L{CertBase} into a provider of the given interface.
@param interface: The interface to conform to.
@type interface: L{Interface}
@return: an L{IOpenSSLTrustRoot} provider or L{NotImplemented}
@rtype: C{interface} or L{NotImplemented}
"""
if interface is IOpenSSLTrustRoot:
return OpenSSLCertificateAuthorities([self.original])
return NotImplemented
def _handleattrhelper(Class, transport, methodName):
"""
(private) Helper for L{Certificate.peerFromTransport} and
L{Certificate.hostFromTransport} which checks for incompatible handle types
and null certificates and raises the appropriate exception or returns the
appropriate certificate object.
"""
method = getattr(transport.getHandle(),
"get_%s_certificate" % (methodName,), None)
if method is None:
raise CertificateError(
"non-TLS transport %r did not have %s certificate" % (transport, methodName))
cert = method()
if cert is None:
raise CertificateError(
"TLS transport %r did not have %s certificate" % (transport, methodName))
return Class(cert)
class Certificate(CertBase):
"""
An x509 certificate.
"""
def __repr__(self):
return '<%s Subject=%s Issuer=%s>' % (self.__class__.__name__,
self.getSubject().commonName,
self.getIssuer().commonName)
def __eq__(self, other):
if isinstance(other, Certificate):
return self.dump() == other.dump()
return False
def __ne__(self, other):
return not self.__eq__(other)
def load(Class, requestData, format=crypto.FILETYPE_ASN1, args=()):
"""
Load a certificate from an ASN.1- or PEM-format string.
@rtype: C{Class}
"""
return Class(crypto.load_certificate(format, requestData), *args)
load = classmethod(load)
_load = load
def dumpPEM(self):
"""
Dump this certificate to a PEM-format data string.
@rtype: C{str}
"""
return self.dump(crypto.FILETYPE_PEM)
def loadPEM(Class, data):
"""
Load a certificate from a PEM-format data string.
@rtype: C{Class}
"""
return Class.load(data, crypto.FILETYPE_PEM)
loadPEM = classmethod(loadPEM)
def peerFromTransport(Class, transport):
"""
Get the certificate for the remote end of the given transport.
@type: L{ISystemHandle}
@rtype: C{Class}
@raise: L{CertificateError}, if the given transport does not have a peer
certificate.
"""
return _handleattrhelper(Class, transport, 'peer')
peerFromTransport = classmethod(peerFromTransport)
def hostFromTransport(Class, transport):
"""
Get the certificate for the local end of the given transport.
@param transport: an L{ISystemHandle} provider; the transport we will
@rtype: C{Class}
@raise: L{CertificateError}, if the given transport does not have a host
certificate.
"""
return _handleattrhelper(Class, transport, 'host')
hostFromTransport = classmethod(hostFromTransport)
def getPublicKey(self):
"""
Get the public key for this certificate.
@rtype: L{PublicKey}
"""
return PublicKey(self.original.get_pubkey())
def dump(self, format=crypto.FILETYPE_ASN1):
return crypto.dump_certificate(format, self.original)
def serialNumber(self):
"""
Retrieve the serial number of this certificate.
@rtype: C{int}
"""
return self.original.get_serial_number()
def digest(self, method='md5'):
"""
Return a digest hash of this certificate using the specified hash
algorithm.
@param method: One of C{'md5'} or C{'sha'}.
@rtype: C{str}
"""
return self.original.digest(method)
def _inspect(self):
return '\n'.join(['Certificate For Subject:',
self.getSubject().inspect(),
'\nIssuer:',
self.getIssuer().inspect(),
'\nSerial Number: %d' % self.serialNumber(),
'Digest: %s' % nativeString(self.digest())])
def inspect(self):
"""
Return a multi-line, human-readable representation of this
Certificate, including information about the subject, issuer, and
public key.
"""
return '\n'.join((self._inspect(), self.getPublicKey().inspect()))
def getIssuer(self):
"""
Retrieve the issuer of this certificate.
@rtype: L{DistinguishedName}
@return: A copy of the issuer of this certificate.
"""
return self._copyName('issuer')
def options(self, *authorities):
raise NotImplementedError('Possible, but doubtful we need this yet')
class CertificateRequest(CertBase):
"""
An x509 certificate request.
Certificate requests are given to certificate authorities to be signed and
returned resulting in an actual certificate.
"""
def load(Class, requestData, requestFormat=crypto.FILETYPE_ASN1):
req = crypto.load_certificate_request(requestFormat, requestData)
dn = DistinguishedName()
dn._copyFrom(req.get_subject())
if not req.verify(req.get_pubkey()):
raise VerifyError("Can't verify that request for %r is self-signed." % (dn,))
return Class(req)
load = classmethod(load)
def dump(self, format=crypto.FILETYPE_ASN1):
return crypto.dump_certificate_request(format, self.original)
class PrivateCertificate(Certificate):
"""
An x509 certificate and private key.
"""
def __repr__(self):
return Certificate.__repr__(self) + ' with ' + repr(self.privateKey)
def _setPrivateKey(self, privateKey):
if not privateKey.matches(self.getPublicKey()):
raise VerifyError(
"Certificate public and private keys do not match.")
self.privateKey = privateKey
return self
def newCertificate(self, newCertData, format=crypto.FILETYPE_ASN1):
"""
Create a new L{PrivateCertificate} from the given certificate data and
this instance's private key.
"""
return self.load(newCertData, self.privateKey, format)
def load(Class, data, privateKey, format=crypto.FILETYPE_ASN1):
return Class._load(data, format)._setPrivateKey(privateKey)
load = classmethod(load)
def inspect(self):
return '\n'.join([Certificate._inspect(self),
self.privateKey.inspect()])
def dumpPEM(self):
"""
Dump both public and private parts of a private certificate to
PEM-format data.
"""
return self.dump(crypto.FILETYPE_PEM) + self.privateKey.dump(crypto.FILETYPE_PEM)
def loadPEM(Class, data):
"""
Load both private and public parts of a private certificate from a
chunk of PEM-format data.
"""
return Class.load(data, KeyPair.load(data, crypto.FILETYPE_PEM),
crypto.FILETYPE_PEM)
loadPEM = classmethod(loadPEM)
def fromCertificateAndKeyPair(Class, certificateInstance, privateKey):
privcert = Class(certificateInstance.original)
return privcert._setPrivateKey(privateKey)
fromCertificateAndKeyPair = classmethod(fromCertificateAndKeyPair)
def options(self, *authorities):
"""
Create a context factory using this L{PrivateCertificate}'s certificate
and private key.
@param authorities: A list of L{Certificate} object
@return: A context factory.
@rtype: L{CertificateOptions <twisted.internet.ssl.CertificateOptions>}
"""
options = dict(privateKey=self.privateKey.original,
certificate=self.original)
if authorities:
options.update(dict(trustRoot=OpenSSLCertificateAuthorities(
[auth.original for auth in authorities]
)))
return OpenSSLCertificateOptions(**options)
def certificateRequest(self, format=crypto.FILETYPE_ASN1,
digestAlgorithm='md5'):
return self.privateKey.certificateRequest(
self.getSubject(),
format,
digestAlgorithm)
def signCertificateRequest(self,
requestData,
verifyDNCallback,
serialNumber,
requestFormat=crypto.FILETYPE_ASN1,
certificateFormat=crypto.FILETYPE_ASN1):
issuer = self.getSubject()
return self.privateKey.signCertificateRequest(
issuer,
requestData,
verifyDNCallback,
serialNumber,
requestFormat,
certificateFormat)
def signRequestObject(self, certificateRequest, serialNumber,
secondsToExpiry=60 * 60 * 24 * 365, # One year
digestAlgorithm='md5'):
return self.privateKey.signRequestObject(self.getSubject(),
certificateRequest,
serialNumber,
secondsToExpiry,
digestAlgorithm)
class PublicKey:
def __init__(self, osslpkey):
self.original = osslpkey
req1 = crypto.X509Req()
req1.set_pubkey(osslpkey)
self._emptyReq = crypto.dump_certificate_request(crypto.FILETYPE_ASN1, req1)
def matches(self, otherKey):
return self._emptyReq == otherKey._emptyReq
# XXX This could be a useful method, but sometimes it triggers a segfault,
# so we'll steer clear for now.
# def verifyCertificate(self, certificate):
# """
# returns None, or raises a VerifyError exception if the certificate
# could not be verified.
# """
# if not certificate.original.verify(self.original):
# raise VerifyError("We didn't sign that certificate.")
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.keyHash())
def keyHash(self):
"""
MD5 hex digest of signature on an empty certificate request with this
key.
"""
return md5(self._emptyReq).hexdigest()
def inspect(self):
return 'Public Key with Hash: %s' % (self.keyHash(),)
class KeyPair(PublicKey):
def load(Class, data, format=crypto.FILETYPE_ASN1):
return Class(crypto.load_privatekey(format, data))
load = classmethod(load)
def dump(self, format=crypto.FILETYPE_ASN1):
return crypto.dump_privatekey(format, self.original)
def __getstate__(self):
return self.dump()
def __setstate__(self, state):
self.__init__(crypto.load_privatekey(crypto.FILETYPE_ASN1, state))
def inspect(self):
t = self.original.type()
if t == crypto.TYPE_RSA:
ts = 'RSA'
elif t == crypto.TYPE_DSA:
ts = 'DSA'
else:
ts = '(Unknown Type!)'
L = (self.original.bits(), ts, self.keyHash())
return '%s-bit %s Key Pair with Hash: %s' % L
def generate(Class, kind=crypto.TYPE_RSA, size=1024):
pkey = crypto.PKey()
pkey.generate_key(kind, size)
return Class(pkey)
def newCertificate(self, newCertData, format=crypto.FILETYPE_ASN1):
return PrivateCertificate.load(newCertData, self, format)
generate = classmethod(generate)
def requestObject(self, distinguishedName, digestAlgorithm='md5'):
req = crypto.X509Req()
req.set_pubkey(self.original)
distinguishedName._copyInto(req.get_subject())
req.sign(self.original, digestAlgorithm)
return CertificateRequest(req)
def certificateRequest(self, distinguishedName,
format=crypto.FILETYPE_ASN1,
digestAlgorithm='md5'):
"""Create a certificate request signed with this key.
@return: a string, formatted according to the 'format' argument.
"""
return self.requestObject(distinguishedName, digestAlgorithm).dump(format)
def signCertificateRequest(self,
issuerDistinguishedName,
requestData,
verifyDNCallback,
serialNumber,
requestFormat=crypto.FILETYPE_ASN1,
certificateFormat=crypto.FILETYPE_ASN1,
secondsToExpiry=60 * 60 * 24 * 365, # One year
digestAlgorithm='md5'):
"""
Given a blob of certificate request data and a certificate authority's
DistinguishedName, return a blob of signed certificate data.
If verifyDNCallback returns a Deferred, I will return a Deferred which
fires the data when that Deferred has completed.
"""
hlreq = CertificateRequest.load(requestData, requestFormat)
dn = hlreq.getSubject()
vval = verifyDNCallback(dn)
def verified(value):
if not value:
raise VerifyError("DN callback %r rejected request DN %r" % (verifyDNCallback, dn))
return self.signRequestObject(issuerDistinguishedName, hlreq,
serialNumber, secondsToExpiry, digestAlgorithm).dump(certificateFormat)
if isinstance(vval, Deferred):
return vval.addCallback(verified)
else:
return verified(vval)
def signRequestObject(self,
issuerDistinguishedName,
requestObject,
serialNumber,
secondsToExpiry=60 * 60 * 24 * 365, # One year
digestAlgorithm='md5'):
"""
Sign a CertificateRequest instance, returning a Certificate instance.
"""
req = requestObject.original
cert = crypto.X509()
issuerDistinguishedName._copyInto(cert.get_issuer())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(secondsToExpiry)
cert.set_serial_number(serialNumber)
cert.sign(self.original, digestAlgorithm)
return Certificate(cert)
def selfSignedCert(self, serialNumber, **kw):
dn = DN(**kw)
return PrivateCertificate.fromCertificateAndKeyPair(
self.signRequestObject(dn, self.requestObject(dn), serialNumber),
self)
class IOpenSSLTrustRoot(Interface):
"""
Trust settings for an OpenSSL context.
Note that this interface's methods are private, so things outside of
Twisted shouldn't implement it.
"""
def _addCACertsToContext(context):
"""
Add certificate-authority certificates to an SSL context whose
connections should trust those authorities.
@param context: An SSL context for a connection which should be
verified by some certificate authority.
@type context: L{OpenSSL.SSL.Context}
@return: L{None}
"""
@implementer(IOpenSSLTrustRoot)
class OpenSSLCertificateAuthorities(object):
"""
Trust an explicitly specified set of certificates, represented by a list of
L{OpenSSL.crypto.X509} objects.
"""
def __init__(self, caCerts):
"""
@param caCerts: The certificate authorities to trust when using this
object as a C{trustRoot} for L{OpenSSLCertificateOptions}.
@type caCerts: L{list} of L{OpenSSL.crypto.X509}
"""
self._caCerts = caCerts
def _addCACertsToContext(self, context):
store = context.get_cert_store()
for cert in self._caCerts:
store.add_cert(cert)
@implementer(IOpenSSLTrustRoot)
class OpenSSLDefaultPaths(object):
"""
Trust the set of default verify paths that OpenSSL was built with, as
specified by U{SSL_CTX_set_default_verify_paths
<https://www.openssl.org/docs/ssl/SSL_CTX_load_verify_locations.html>}.
"""
def _addCACertsToContext(self, context):
context.set_default_verify_paths()
def platformTrust():
"""
Attempt to discover a set of trusted certificate authority certificates
(or, in other words: trust roots, or root certificates) whose trust is
managed and updated by tools outside of Twisted.
If you are writing any client-side TLS code with Twisted, you should use
this as the C{trustRoot} argument to L{CertificateOptions
<twisted.internet.ssl.CertificateOptions>}.
The result of this function should be like the up-to-date list of
certificates in a web browser. When developing code that uses
C{platformTrust}, you can think of it that way. However, the choice of
which certificate authorities to trust is never Twisted's responsibility.
Unless you're writing a very unusual application or library, it's not your
code's responsibility either. The user may use platform-specific tools for
defining which server certificates should be trusted by programs using TLS.
The purpose of using this API is to respect that decision as much as
possible.
This should be a set of trust settings most appropriate for I{client} TLS
connections; i.e. those which need to verify a server's authenticity. You
should probably use this by default for any client TLS connection that you
create. For servers, however, client certificates are typically not
verified; or, if they are, their verification will depend on a custom,
application-specific certificate authority.
@since: 14.0
@note: Currently, L{platformTrust} depends entirely upon your OpenSSL build
supporting a set of "L{default verify paths <OpenSSLDefaultPaths>}"
which correspond to certificate authority trust roots. Unfortunately,
whether this is true of your system is both outside of Twisted's
control and difficult (if not impossible) for Twisted to detect
automatically.
Nevertheless, this ought to work as desired by default on:
- Ubuntu Linux machines with the U{ca-certificates
<https://launchpad.net/ubuntu/+source/ca-certificates>} package
installed,
- Mac OS X when using the system-installed version of OpenSSL (i.e.
I{not} one installed via MacPorts or Homebrew),
- any build of OpenSSL which has had certificate authority
certificates installed into its default verify paths (by default,
C{/usr/local/ssl/certs} if you've built your own OpenSSL), or
- any process where the C{SSL_CERT_FILE} environment variable is
set to the path of a file containing your desired CA certificates
bundle.
Hopefully soon, this API will be updated to use more sophisticated
trust-root discovery mechanisms. Until then, you can follow tickets in
the Twisted tracker for progress on this implementation on U{Microsoft
Windows <https://twistedmatrix.com/trac/ticket/6371>}, U{Mac OS X
<https://twistedmatrix.com/trac/ticket/6372>}, and U{a fallback for
other platforms which do not have native trust management tools
<https://twistedmatrix.com/trac/ticket/6934>}.
@return: an appropriate trust settings object for your platform.
@rtype: L{IOpenSSLTrustRoot}
@raise NotImplementedError: if this platform is not yet supported by
Twisted. At present, only OpenSSL is supported.
"""
return OpenSSLDefaultPaths()
def _tolerateErrors(wrapped):
"""
Wrap up an C{info_callback} for pyOpenSSL so that if something goes wrong
the error is immediately logged and the connection is dropped if possible.
This wrapper exists because some versions of pyOpenSSL don't handle errors
from callbacks at I{all}, and those which do write tracebacks directly to
stderr rather than to a supplied logging system. This reports unexpected
errors to the Twisted logging system.
Also, this terminates the connection immediately if possible because if
you've got bugs in your verification logic it's much safer to just give up.
@param wrapped: A valid C{info_callback} for pyOpenSSL.
@type wrapped: L{callable}
@return: A valid C{info_callback} for pyOpenSSL that handles any errors in
C{wrapped}.
@rtype: L{callable}
"""
def infoCallback(connection, where, ret):
try:
return wrapped(connection, where, ret)
except:
f = Failure()
log.err(f, "Error during info_callback")
connection.get_app_data().failVerification(f)
return infoCallback
@implementer(IOpenSSLClientConnectionCreator)
class ClientTLSOptions(object):
"""
Client creator for TLS.
Private implementation type (not exposed to applications) for public
L{optionsForClientTLS} API.
@ivar _ctx: The context to use for new connections.
@type _ctx: L{SSL.Context}
@ivar _hostname: The hostname to verify, as specified by the application,
as some human-readable text.
@type _hostname: L{unicode}
@ivar _hostnameBytes: The hostname to verify, decoded into IDNA-encoded
bytes. This is passed to APIs which think that hostnames are bytes,
such as OpenSSL's SNI implementation.
@type _hostnameBytes: L{bytes}
@ivar _hostnameASCII: The hostname, as transcoded into IDNA ASCII-range
unicode code points. This is pre-transcoded because the
C{service_identity} package is rather strict about requiring the
C{idna} package from PyPI for internationalized domain names, rather
than working with Python's built-in (but sometimes broken) IDNA
encoding. ASCII values, however, will always work.
@type _hostnameASCII: L{unicode}
"""
def __init__(self, hostname, ctx):
"""
Initialize L{ClientTLSOptions}.
@param hostname: The hostname to verify as input by a human.
@type hostname: L{unicode}
@param ctx: an L{SSL.Context} to use for new connections.
@type ctx: L{SSL.Context}.
"""
self._ctx = ctx
self._hostname = hostname
self._hostnameBytes = _idnaBytes(hostname)
self._hostnameASCII = self._hostnameBytes.decode("ascii")
ctx.set_info_callback(
_tolerateErrors(self._identityVerifyingInfoCallback)
)
def clientConnectionForTLS(self, tlsProtocol):
"""
Create a TLS connection for a client.
@note: This will call C{set_app_data} on its connection. If you're
delegating to this implementation of this method, don't ever call
C{set_app_data} or C{set_info_callback} on the returned connection,
or you'll break the implementation of various features of this
class.
@param tlsProtocol: the TLS protocol initiating the connection.
@type tlsProtocol: L{twisted.protocols.tls.TLSMemoryBIOProtocol}
@return: the configured client connection.
@rtype: L{OpenSSL.SSL.Connection}
"""
context = self._ctx
connection = SSL.Connection(context, None)
connection.set_app_data(tlsProtocol)
return connection
def _identityVerifyingInfoCallback(self, connection, where, ret):
"""
U{info_callback
<http://pythonhosted.org/pyOpenSSL/api/ssl.html#OpenSSL.SSL.Context.set_info_callback>
} for pyOpenSSL that verifies the hostname in the presented certificate
matches the one passed to this L{ClientTLSOptions}.
@param connection: the connection which is handshaking.
@type connection: L{OpenSSL.SSL.Connection}
@param where: flags indicating progress through a TLS handshake.
@type where: L{int}
@param ret: ignored
@type ret: ignored
"""
if where & SSL_CB_HANDSHAKE_START:
_maybeSetHostNameIndication(connection, self._hostnameBytes)
elif where & SSL_CB_HANDSHAKE_DONE:
try:
verifyHostname(connection, self._hostnameASCII)
except VerificationError:
f = Failure()
transport = connection.get_app_data()
transport.failVerification(f)
def optionsForClientTLS(hostname, trustRoot=None, clientCertificate=None,
**kw):
"""
Create a L{client connection creator <IOpenSSLClientConnectionCreator>} for
use with APIs such as L{SSL4ClientEndpoint
<twisted.internet.endpoints.SSL4ClientEndpoint>}, L{connectSSL
<twisted.internet.interfaces.IReactorSSL.connectSSL>}, and L{startTLS
<twisted.internet.interfaces.ITLSTransport.startTLS>}.
@since: 14.0
@param hostname: The expected name of the remote host. This serves two
purposes: first, and most importantly, it verifies that the certificate
received from the server correctly identifies the specified hostname.
The second purpose is (if the local C{pyOpenSSL} supports it) to use
the U{Server Name Indication extension
<https://en.wikipedia.org/wiki/Server_Name_Indication>} to indicate to
the server which certificate should be used.
@type hostname: L{unicode}
@param trustRoot: Specification of trust requirements of peers. This may
be a L{Certificate} or the result of L{platformTrust}. By default it
is L{platformTrust} and you probably shouldn't adjust it unless you
really know what you're doing. Be aware that clients using this
interface I{must} verify the server; you cannot explicitly pass C{None}
since that just means to use L{platformTrust}.
@type trustRoot: L{IOpenSSLTrustRoot}
@param clientCertificate: The certificate and private key that the client
will use to authenticate to the server. If unspecified, the client
will not authenticate.
@type clientCertificate: L{PrivateCertificate}
@param extraCertificateOptions: keyword-only argument; this is a dictionary
of additional keyword arguments to be presented to
L{CertificateOptions}. Please avoid using this unless you absolutely
need to; any time you need to pass an option here that is a bug in this
interface.
@type extraCertificateOptions: L{dict}
@param kw: (Backwards compatibility hack to allow keyword-only arguments on
Python 2. Please ignore; arbitrary keyword arguments will be errors.)
@type kw: L{dict}
@return: A client connection creator.
@rtype: L{IOpenSSLClientConnectionCreator}
"""
extraCertificateOptions = kw.pop('extraCertificateOptions', None) or {}
if trustRoot is None:
trustRoot = platformTrust()
if kw:
raise TypeError(
"optionsForClientTLS() got an unexpected keyword argument"
" '{arg}'".format(
arg=kw.popitem()[0]
)
)
if not isinstance(hostname, unicode):
raise TypeError(
"optionsForClientTLS requires text for host names, not "
+ hostname.__class__.__name__
)
if clientCertificate:
extraCertificateOptions.update(
privateKey=clientCertificate.privateKey.original,
certificate=clientCertificate.original
)
certificateOptions = OpenSSLCertificateOptions(
trustRoot=trustRoot,
**extraCertificateOptions
)
return ClientTLSOptions(hostname, certificateOptions.getContext())
class OpenSSLCertificateOptions(object):
"""
A L{CertificateOptions <twisted.internet.ssl.CertificateOptions>} specifies
the security properties for a client or server TLS connection used with
OpenSSL.
@ivar _options: Any option flags to set on the L{OpenSSL.SSL.Context}
object that will be created.
@type _options: L{int}
@ivar _cipherString: An OpenSSL-specific cipher string.
@type _cipherString: L{unicode}
"""
# Factory for creating contexts. Configurable for testability.
_contextFactory = SSL.Context
_context = None
# Some option constants may not be exposed by PyOpenSSL yet.
_OP_ALL = getattr(SSL, 'OP_ALL', 0x0000FFFF)
_OP_NO_TICKET = getattr(SSL, 'OP_NO_TICKET', 0x00004000)
_OP_NO_COMPRESSION = getattr(SSL, 'OP_NO_COMPRESSION', 0x00020000)
_OP_CIPHER_SERVER_PREFERENCE = getattr(SSL, 'OP_CIPHER_SERVER_PREFERENCE ',
0x00400000)
_OP_SINGLE_ECDH_USE = getattr(SSL, 'OP_SINGLE_ECDH_USE ', 0x00080000)
@_mutuallyExclusiveArguments([
['trustRoot', 'requireCertificate'],
['trustRoot', 'verify'],
['trustRoot', 'caCerts'],
])
def __init__(self,
privateKey=None,
certificate=None,
method=None,
verify=False,
caCerts=None,
verifyDepth=9,
requireCertificate=True,
verifyOnce=True,
enableSingleUseKeys=True,
enableSessions=True,
fixBrokenPeers=False,
enableSessionTickets=False,
extraCertChain=None,
acceptableCiphers=None,
dhParameters=None,
trustRoot=None):
"""
Create an OpenSSL context SSL connection context factory.
@param privateKey: A PKey object holding the private key.
@param certificate: An X509 object holding the certificate.
@param method: The SSL protocol to use, one of SSLv23_METHOD,
SSLv2_METHOD, SSLv3_METHOD, TLSv1_METHOD (or any other method
constants provided by pyOpenSSL). By default, a setting will be
used which allows TLSv1.0, TLSv1.1, and TLSv1.2.
@param verify: Please use a C{trustRoot} keyword argument instead,
since it provides the same functionality in a less error-prone way.
By default this is L{False}.
If L{True}, verify certificates received from the peer and fail the
handshake if verification fails. Otherwise, allow anonymous
sessions and sessions with certificates which fail validation.
@param caCerts: Please use a C{trustRoot} keyword argument instead,
since it provides the same functionality in a less error-prone way.
List of certificate authority certificate objects to use to verify
the peer's certificate. Only used if verify is L{True} and will be
ignored otherwise. Since verify is L{False} by default, this is
C{None} by default.
@type caCerts: C{list} of L{OpenSSL.crypto.X509}
@param verifyDepth: Depth in certificate chain down to which to verify.
If unspecified, use the underlying default (9).
@param requireCertificate: Please use a C{trustRoot} keyword argument
instead, since it provides the same functionality in a less
error-prone way.
If L{True}, do not allow anonymous sessions; defaults to L{True}.
@param verifyOnce: If True, do not re-verify the certificate on session
resumption.
@param enableSingleUseKeys: If L{True}, generate a new key whenever
ephemeral DH and ECDH parameters are used to prevent small subgroup
attacks and to ensure perfect forward secrecy.
@param enableSessions: If True, set a session ID on each context. This
allows a shortened handshake to be used when a known client
reconnects.
@param fixBrokenPeers: If True, enable various non-spec protocol fixes
for broken SSL implementations. This should be entirely safe,
according to the OpenSSL documentation, but YMMV. This option is
now off by default, because it causes problems with connections
between peers using OpenSSL 0.9.8a.
@param enableSessionTickets: If L{True}, enable session ticket
extension for session resumption per RFC 5077. Note there is no
support for controlling session tickets. This option is off by
default, as some server implementations don't correctly process
incoming empty session ticket extensions in the hello.
@param extraCertChain: List of certificates that I{complete} your
verification chain if the certificate authority that signed your
C{certificate} isn't widely supported. Do I{not} add
C{certificate} to it.
@type extraCertChain: C{list} of L{OpenSSL.crypto.X509}
@param acceptableCiphers: Ciphers that are acceptable for connections.
Uses a secure default if left L{None}.
@type acceptableCiphers: L{IAcceptableCiphers}
@param dhParameters: Key generation parameters that are required for
Diffie-Hellman key exchange. If this argument is left L{None},
C{EDH} ciphers are I{disabled} regardless of C{acceptableCiphers}.
@type dhParameters: L{DiffieHellmanParameters
<twisted.internet.ssl.DiffieHellmanParameters>}
@param trustRoot: Specification of trust requirements of peers. If
this argument is specified, the peer is verified. It requires a
certificate, and that certificate must be signed by one of the
certificate authorities specified by this object.
Note that since this option specifies the same information as
C{caCerts}, C{verify}, and C{requireCertificate}, specifying any of
those options in combination with this one will raise a
L{TypeError}.
@type trustRoot: L{IOpenSSLTrustRoot}
@raise ValueError: when C{privateKey} or C{certificate} are set without
setting the respective other.
@raise ValueError: when C{verify} is L{True} but C{caCerts} doesn't
specify any CA certificates.
@raise ValueError: when C{extraCertChain} is passed without specifying
C{privateKey} or C{certificate}.
@raise ValueError: when C{acceptableCiphers} doesn't yield any usable
ciphers for the current platform.
@raise TypeError: if C{trustRoot} is passed in combination with
C{caCert}, C{verify}, or C{requireCertificate}. Please prefer
C{trustRoot} in new code, as its semantics are less tricky.
"""
if (privateKey is None) != (certificate is None):
raise ValueError(
"Specify neither or both of privateKey and certificate")
self.privateKey = privateKey
self.certificate = certificate
# Set basic security options: disallow insecure SSLv2, disallow TLS
# compression to avoid CRIME attack, make the server choose the
# ciphers.
self._options = (
SSL.OP_NO_SSLv2 | self._OP_NO_COMPRESSION |
self._OP_CIPHER_SERVER_PREFERENCE
)
if method is None:
# If no method is specified set things up so that TLSv1.0 and newer
# will be supported.
self.method = SSL.SSLv23_METHOD
self._options |= SSL.OP_NO_SSLv3
else:
# Otherwise respect the application decision.
self.method = method
if verify and not caCerts:
raise ValueError("Specify client CA certificate information if and"
" only if enabling certificate verification")
self.verify = verify
if extraCertChain is not None and None in (privateKey, certificate):
raise ValueError("A private key and a certificate are required "
"when adding a supplemental certificate chain.")
if extraCertChain is not None:
self.extraCertChain = extraCertChain
else:
self.extraCertChain = []
self.caCerts = caCerts
self.verifyDepth = verifyDepth
self.requireCertificate = requireCertificate
self.verifyOnce = verifyOnce
self.enableSingleUseKeys = enableSingleUseKeys
if enableSingleUseKeys:
self._options |= SSL.OP_SINGLE_DH_USE | self._OP_SINGLE_ECDH_USE
self.enableSessions = enableSessions
self.fixBrokenPeers = fixBrokenPeers
if fixBrokenPeers:
self._options |= self._OP_ALL
self.enableSessionTickets = enableSessionTickets
if not enableSessionTickets:
self._options |= self._OP_NO_TICKET
self.dhParameters = dhParameters
try:
self._ecCurve = _OpenSSLECCurve(_defaultCurveName)
except NotImplementedError:
self._ecCurve = None
if acceptableCiphers is None:
acceptableCiphers = defaultCiphers
# This needs to run when method and _options are finalized.
self._cipherString = u':'.join(
c.fullName
for c in acceptableCiphers.selectCiphers(
_expandCipherString(u'ALL', self.method, self._options)
)
)
if self._cipherString == u'':
raise ValueError(
'Supplied IAcceptableCiphers yielded no usable ciphers '
'on this platform.'
)
if trustRoot is None:
if self.verify:
trustRoot = OpenSSLCertificateAuthorities(caCerts)
else:
self.verify = True
self.requireCertificate = True
trustRoot = IOpenSSLTrustRoot(trustRoot)
self.trustRoot = trustRoot
def __getstate__(self):
d = self.__dict__.copy()
try:
del d['_context']
except KeyError:
pass
return d
def __setstate__(self, state):
self.__dict__ = state
def getContext(self):
"""
Return an L{OpenSSL.SSL.Context} object.
"""
if self._context is None:
self._context = self._makeContext()
return self._context
def _makeContext(self):
ctx = self._contextFactory(self.method)
ctx.set_options(self._options)
if self.certificate is not None and self.privateKey is not None:
ctx.use_certificate(self.certificate)
ctx.use_privatekey(self.privateKey)
for extraCert in self.extraCertChain:
ctx.add_extra_chain_cert(extraCert)
# Sanity check
ctx.check_privatekey()
verifyFlags = SSL.VERIFY_NONE
if self.verify:
verifyFlags = SSL.VERIFY_PEER
if self.requireCertificate:
verifyFlags |= SSL.VERIFY_FAIL_IF_NO_PEER_CERT
if self.verifyOnce:
verifyFlags |= SSL.VERIFY_CLIENT_ONCE
self.trustRoot._addCACertsToContext(ctx)
# It'd be nice if pyOpenSSL let us pass None here for this behavior (as
# the underlying OpenSSL API call allows NULL to be passed). It
# doesn't, so we'll supply a function which does the same thing.
def _verifyCallback(conn, cert, errno, depth, preverify_ok):
return preverify_ok
ctx.set_verify(verifyFlags, _verifyCallback)
if self.verifyDepth is not None:
ctx.set_verify_depth(self.verifyDepth)
if self.enableSessions:
name = "%s-%d" % (reflect.qual(self.__class__), _sessionCounter())
sessionName = md5(networkString(name)).hexdigest()
ctx.set_session_id(sessionName)
if self.dhParameters:
ctx.load_tmp_dh(self.dhParameters._dhFile.path)
ctx.set_cipher_list(nativeString(self._cipherString))
if self._ecCurve is not None:
try:
self._ecCurve.addECKeyToContext(ctx)
except BaseException:
pass # ECDHE support is best effort only.
return ctx
class _OpenSSLECCurve(FancyEqMixin, object):
"""
A private representation of an OpenSSL ECC curve.
"""
compareAttributes = ("snName", )
def __init__(self, snName):
"""
@param snName: The name of the curve as used by C{OBJ_sn2nid}.
@param snName: L{unicode}
@raises NotImplementedError: If ECC support is not available.
@raises ValueError: If C{snName} is not a supported curve.
"""
self.snName = nativeString(snName)
# As soon as pyOpenSSL supports ECDHE directly, attempt to use its
# APIs first. See #7033.
# If pyOpenSSL is based on cryptography.io (0.14+), we use its
# bindings directly to set the ECDHE curve.
try:
binding = self._getBinding()
self._lib = binding.lib
self._ffi = binding.ffi
self._nid = self._lib.OBJ_sn2nid(self.snName.encode('ascii'))
if self._nid == self._lib.NID_undef:
raise ValueError("Unknown ECC curve.")
except AttributeError:
raise NotImplementedError(
"This version of pyOpenSSL does not support ECC."
)
def _getBinding(self):
"""
Attempt to get cryptography's binding instance.
@raises NotImplementedError: If underlying pyOpenSSL is not based on
cryptography.
@return: cryptograpy bindings.
@rtype: C{cryptography.hazmat.bindings.openssl.Binding}
"""
try:
from OpenSSL._util import binding
return binding
except ImportError:
raise NotImplementedError(
"This version of pyOpenSSL does not support ECC."
)
def addECKeyToContext(self, context):
"""
Add an temporary EC key to C{context}.
@param context: The context to add a key to.
@type context: L{OpenSSL.SSL.Context}
"""
ecKey = self._lib.EC_KEY_new_by_curve_name(self._nid)
if ecKey == self._ffi.NULL:
raise EnvironmentError("EC key creation failed.")
self._lib.SSL_CTX_set_tmp_ecdh(context._context, ecKey)
self._lib.EC_KEY_free(ecKey)
@implementer(ICipher)
class OpenSSLCipher(FancyEqMixin, object):
"""
A representation of an OpenSSL cipher.
"""
compareAttributes = ('fullName',)
def __init__(self, fullName):
"""
@param fullName: The full name of the cipher. For example
C{u"ECDHE-RSA-AES256-GCM-SHA384"}.
@type fullName: L{unicode}
"""
self.fullName = fullName
def __repr__(self):
"""
A runnable representation of the cipher.
"""
return 'OpenSSLCipher({0!r})'.format(self.fullName)
def _expandCipherString(cipherString, method, options):
"""
Expand C{cipherString} according to C{method} and C{options} to a list
of explicit ciphers that are supported by the current platform.
@param cipherString: An OpenSSL cipher string to expand.
@type cipherString: L{unicode}
@param method: An OpenSSL method like C{SSL.TLSv1_METHOD} used for
determining the effective ciphers.
@param options: OpenSSL options like C{SSL.OP_NO_SSLv3} ORed together.
@type options: L{int}
@return: The effective list of explicit ciphers that results from the
arguments on the current platform.
@rtype: L{list} of L{ICipher}
"""
ctx = SSL.Context(method)
ctx.set_options(options)
try:
ctx.set_cipher_list(nativeString(cipherString))
except SSL.Error as e:
if e.args[0][0][2] == 'no cipher match':
return []
else:
raise
conn = SSL.Connection(ctx, None)
ciphers = conn.get_cipher_list()
if isinstance(ciphers[0], unicode):
return [OpenSSLCipher(cipher) for cipher in ciphers]
else:
return [OpenSSLCipher(cipher.decode('ascii')) for cipher in ciphers]
@implementer(IAcceptableCiphers)
class OpenSSLAcceptableCiphers(object):
"""
A representation of ciphers that are acceptable for TLS connections.
"""
def __init__(self, ciphers):
self._ciphers = ciphers
def selectCiphers(self, availableCiphers):
return [cipher
for cipher in self._ciphers
if cipher in availableCiphers]
@classmethod
def fromOpenSSLCipherString(cls, cipherString):
"""
Create a new instance using an OpenSSL cipher string.
@param cipherString: An OpenSSL cipher string that describes what
cipher suites are acceptable.
See the documentation of U{OpenSSL
<http://www.openssl.org/docs/apps/ciphers.html#CIPHER_STRINGS>} or
U{Apache
<http://httpd.apache.org/docs/2.4/mod/mod_ssl.html#sslciphersuite>}
for details.
@type cipherString: L{unicode}
@return: Instance representing C{cipherString}.
@rtype: L{twisted.internet.ssl.AcceptableCiphers}
"""
return cls(_expandCipherString(
nativeString(cipherString),
SSL.SSLv23_METHOD, SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
)
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
#
defaultCiphers = OpenSSLAcceptableCiphers.fromOpenSSLCipherString(
"ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:"
"DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS"
)
_defaultCurveName = u"prime256v1"
class OpenSSLDiffieHellmanParameters(object):
"""
A representation of key generation parameters that are required for
Diffie-Hellman key exchange.
"""
def __init__(self, parameters):
self._dhFile = parameters
@classmethod
def fromFile(cls, filePath):
"""
Load parameters from a file.
Such a file can be generated using the C{openssl} command line tool as
following:
C{openssl dhparam -out dh_param_1024.pem -2 1024}
Please refer to U{OpenSSL's C{dhparam} documentation
<http://www.openssl.org/docs/apps/dhparam.html>} for further details.
@param filePath: A file containing parameters for Diffie-Hellman key
exchange.
@type filePath: L{FilePath <twisted.python.filepath.FilePath>}
@return: A instance that loads its parameters from C{filePath}.
@rtype: L{DiffieHellmanParameters
<twisted.internet.ssl.DiffieHellmanParameters>}
"""
return cls(filePath)
| bsd-3-clause | -3,236,313,116,830,708,700 | 6,491,997,117,830,422,000 | 33.319812 | 113 | 0.634057 | false |
nathanaevitas/odoo | openerp/addons/event/wizard/__init__.py | 435 | 1067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import event_confirm
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 203,739,819,328,543,680 | -3,845,633,282,831,854,600 | 43.458333 | 78 | 0.615745 | false |
govarguz/espressopp | src/main/_setup.py | 9 | 4037 | # Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# now load the fundamental modules
# load mpi4py (must be loaded before _espressopp)
import mpi4py.MPI as MPI
# load the ES++-C++ module
import _espressopp
# load PMI explicitly from espressopp
from espressopp import pmi
# define pmiimport
if pmi.isController :
def pmiimport(module):
pmi.exec_('import ' + module)
else:
def pmiimport(module):
pass
# set up logging
def _setupLogging():
import logging, os, math
logConfigFile="espressopp_log.conf"
if os.path.exists(logConfigFile) :
import logging.config
logging.config.fileConfig(logConfigFile)
log = logging.getLogger('root')
log.info('Reading log config file %s', logConfigFile)
else :
logging.basicConfig(
format = "%(process)d %(asctime)s %(name)s (%(filename)s::%(lineno)s,%(funcName)s) %(levelname)s: %(message)s")
log = logging.getLogger('root')
log.info('Did not find log config file %s, using basic configuration.', logConfigFile)
# This initialization routine will change existing and future loggers
# to make a connection with their Python logger and change their class
def __my_setLevel(self, level):
__orig_setLevel(self, level)
_espressopp.setLogger(self)
__orig_setLevel = logging.Logger.setLevel
logging.Logger.setLevel = __my_setLevel
logging.TRACE = int((logging.NOTSET + logging.DEBUG)/2.0)
logging.addLevelName('TRACE', logging.TRACE)
_espressopp.setLogger()
# execute the function
_setupLogging()
def _setupProperty():
import __builtin__
# Make the property setter decorator syntax of python 2.6+ available
# to earlier versions
try :
__setter = __builtin__.property.setter
except AttributeError :
import __builtin__, sys
# save the property builtin
_property = __builtin__.property
# now define our property
# stolen from http://bruynooghe.blogspot.com/2008/04/xsetter-syntax-in-python-25.html
class property(_property):
def __init__(self, fget, *args, **kwargs):
self.__doc__ = fget.__doc__
super(property, self).__init__(fget, *args, **kwargs)
def setter(self, fset):
cls_ns = sys._getframe(1).f_locals
for k, v in cls_ns.iteritems():
if v == self:
propname = k
break
cls_ns[propname] = _property(self.fget, fset,
self.fdel, self.__doc__)
return cls_ns[propname]
def deleter(self, fdel):
cls_ns = sys._getframe(1).f_locals
for k, v in cls_ns.iteritems():
if v == self:
propname = k
break
cls_ns[propname] = _property(self.fget, self.fset,
fdel, self.__doc__)
return cls_ns[propname]
# Now override the property builtin
__builtin__.property = property
_setupProperty()
| gpl-3.0 | -1,097,322,865,320,793,600 | -3,987,197,725,002,825,700 | 34.725664 | 122 | 0.604162 | false |
hosseinsadeghi/ultracold-ions | uci/AngularDampingAdvance.py | 2 | 3099 | # vi: ts=4 sw=4
import math
import numpy
import pyopencl.array as cl_array
import pyopencl as cl
import sys
import os
class AngularDampingAdvance():
def __init__(self, ctx = None, queue = None):
self.minRadius = 1.0e-5
self.ctx = ctx
self.queue = queue
if self.ctx == None:
self.ctx = cl.create_some_context()
if self.queue == None:
self.queue = cl.CommandQueue(self.ctx,
properties = cl.command_queue_properties.PROFILING_ENABLE)
absolutePathToKernels = os.path.dirname(
os.path.realpath(__file__))
src = open(absolutePathToKernels + '/angular_damping_advance.cl',
'r').read()
self.angularDampingAdvF = cl.Program(self.ctx, src)
try:
self.angularDampingAdvF.build()
except:
print("Error:")
print(self.angularDampingAdvF.get_build_info(
self.ctx.devices[0],
cl.program_build_info.LOG))
raise
self.angularDampingAdvF.advance_ptcls_angular_damping.set_scalar_arg_dtypes(
[None, None, None, None, None, None, None, None,
numpy.float32, numpy.float32, numpy.float32,
numpy.int32])
self.angularDampingAdvD = cl.Program(self.ctx, src)
try:
self.angularDampingAdvD.build()
except:
print("Error:")
print(self.angularDampingAdvD.get_build_info(
self.ctx.devices[0],
cl.program_build_info.LOG))
raise
self.angularDampingAdvD.advance_ptcls_angular_damping.set_scalar_arg_dtypes(
[None, None, None, None, None, None, None, None,
numpy.float64, numpy.float64, numpy.float64,
numpy.int32])
def advancePtcls(self, xd, yd, zd, vxd, vyd, vzd, qd, md,
dampingCoefficient, omega, dt):
"""
Dampen velocities in the x-y plane.
"""
prec = xd.dtype
if prec == numpy.float32:
self.angularDampingAdvD.advance_ptcls_angular_damping(self.queue,
(xd.size, ), None,
xd.data, yd.data, zd.data,
vxd.data, vyd.data, vzd.data,
qd.data, md.data,
numpy.float32(math.exp(-dampingCoefficient * dt)),
numpy.float32(omega),
numpy.float32(self.minRadius),
numpy.int32(xd.size),
g_times_l = False)
elif prec == numpy.float64:
self.angularDampingAdvD.advance_ptcls_angular_damping(self.queue,
(xd.size, ), None,
xd.data, yd.data, zd.data,
vxd.data, vyd.data, vzd.data,
qd.data, md.data,
numpy.float64(math.exp(-dampingCoefficient * dt)),
numpy.float64(omega),
numpy.float64(self.minRadius),
numpy.int32(xd.size),
g_times_l = False)
else:
print("Unknown float type.")
| mit | -1,727,359,358,883,695,900 | 959,287,889,500,439,200 | 36.337349 | 84 | 0.538238 | false |
chriscauley/django-registration | registration/admin.py | 1 | 1630 | from django.contrib import admin, messages
from django.contrib.sites.requests import RequestSite
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from registration.models import RegistrationProfile
class RawMixin(object):
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name in self.raw_id_fields:
kwargs.pop("request", None)
type = db_field.rel.__class__.__name__
if type == "ManyToOneRel":
kwargs['widget'] = VerboseForeignKeyRawIdWidget(db_field.rel, site)
elif type == "ManyToManyRel":
kwargs['widget'] = VerboseManyToManyRawIdWidget(db_field.rel, site)
return db_field.formfield(**kwargs)
return super(RawMixin, self).formfield_for_dbfield(db_field, **kwargs)
class RegistrationAdmin(RawMixin,admin.ModelAdmin):
actions = ['activate_users', 'resend_activation_email']
list_display = ('user', 'expired')
raw_id_fields = ['user']
search_fields = ('user__username', 'user__first_name', 'user__last_name')
def activate_users(self, request, queryset):
for profile in queryset:
RegistrationProfile.objects.activate_user(profile.activation_key)
activate_users.short_description = _("Activate users")
def resend_activation_email(self, request, queryset):
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
for profile in queryset:
profile.send_activation_email(site)
resend_activation_email.short_description = _("Re-send activation emails")
admin.site.register(RegistrationProfile, RegistrationAdmin)
| bsd-3-clause | -8,015,761,858,845,963,000 | 6,103,402,365,530,941,000 | 37.809524 | 76 | 0.722086 | false |
joakim-hove/django | django/contrib/flatpages/views.py | 475 | 2777 | from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.shortcuts import get_current_site
from django.http import Http404, HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404
from django.template import loader
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_protect
DEFAULT_TEMPLATE = 'flatpages/default.html'
# This view is called from FlatpageFallbackMiddleware.process_response
# when a 404 is raised, which often means CsrfViewMiddleware.process_view
# has not been called even if CsrfViewMiddleware is installed. So we need
# to use @csrf_protect, in case the template needs {% csrf_token %}.
# However, we can't just wrap this view; if no matching flatpage exists,
# or a redirect is required for authentication, the 404 needs to be returned
# without any CSRF checks. Therefore, we only
# CSRF protect the internal implementation.
def flatpage(request, url):
"""
Public interface to the flat page view.
Models: `flatpages.flatpages`
Templates: Uses the template defined by the ``template_name`` field,
or :template:`flatpages/default.html` if template_name is not defined.
Context:
flatpage
`flatpages.flatpages` object
"""
if not url.startswith('/'):
url = '/' + url
site_id = get_current_site(request).id
try:
f = get_object_or_404(FlatPage,
url=url, sites=site_id)
except Http404:
if not url.endswith('/') and settings.APPEND_SLASH:
url += '/'
f = get_object_or_404(FlatPage,
url=url, sites=site_id)
return HttpResponsePermanentRedirect('%s/' % request.path)
else:
raise
return render_flatpage(request, f)
@csrf_protect
def render_flatpage(request, f):
"""
Internal interface to the flat page view.
"""
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
if f.registration_required and not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.path)
if f.template_name:
template = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
template = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
response = HttpResponse(template.render({'flatpage': f}, request))
return response
| bsd-3-clause | 4,201,783,663,695,209,000 | 6,543,240,923,470,098,000 | 37.569444 | 78 | 0.699316 | false |
SerialShadow/SickRage | lib/hachoir_core/field/basic_field_set.py | 74 | 4776 | from hachoir_core.field import Field, FieldError
from hachoir_core.stream import InputStream
from hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN, MIDDLE_ENDIAN
from hachoir_core.event_handler import EventHandler
class ParserError(FieldError):
"""
Error raised by a field set.
@see: L{FieldError}
"""
pass
class MatchError(FieldError):
"""
Error raised by a field set when the stream content doesn't
match to file format.
@see: L{FieldError}
"""
pass
class BasicFieldSet(Field):
_event_handler = None
is_field_set = True
endian = None
def __init__(self, parent, name, stream, description, size):
# Sanity checks (preconditions)
assert not parent or issubclass(parent.__class__, BasicFieldSet)
assert issubclass(stream.__class__, InputStream)
# Set field set size
if size is None and self.static_size:
assert isinstance(self.static_size, (int, long))
size = self.static_size
# Set Field attributes
self._parent = parent
self._name = name
self._size = size
self._description = description
self.stream = stream
self._field_array_count = {}
# Set endian
if not self.endian:
assert parent and parent.endian
self.endian = parent.endian
if parent:
# This field set is one of the root leafs
self._address = parent.nextFieldAddress()
self.root = parent.root
assert id(self.stream) == id(parent.stream)
else:
# This field set is the root
self._address = 0
self.root = self
self._global_event_handler = None
# Sanity checks (post-conditions)
assert self.endian in (BIG_ENDIAN, LITTLE_ENDIAN, MIDDLE_ENDIAN)
if (self._size is not None) and (self._size <= 0):
raise ParserError("Invalid parser '%s' size: %s" % (self.path, self._size))
def reset(self):
self._field_array_count = {}
def createValue(self):
return None
def connectEvent(self, event_name, handler, local=True):
assert event_name in (
# Callback prototype: def f(field)
# Called when new value is already set
"field-value-changed",
# Callback prototype: def f(field)
# Called when field size is already set
"field-resized",
# A new field has been inserted in the field set
# Callback prototype: def f(index, new_field)
"field-inserted",
# Callback prototype: def f(old_field, new_field)
# Called when new field is already in field set
"field-replaced",
# Callback prototype: def f(field, new_value)
# Called to ask to set new value
"set-field-value"
), "Event name %r is invalid" % event_name
if local:
if self._event_handler is None:
self._event_handler = EventHandler()
self._event_handler.connect(event_name, handler)
else:
if self.root._global_event_handler is None:
self.root._global_event_handler = EventHandler()
self.root._global_event_handler.connect(event_name, handler)
def raiseEvent(self, event_name, *args):
# Transfer event to local listeners
if self._event_handler is not None:
self._event_handler.raiseEvent(event_name, *args)
# Transfer event to global listeners
if self.root._global_event_handler is not None:
self.root._global_event_handler.raiseEvent(event_name, *args)
def setUniqueFieldName(self, field):
key = field._name[:-2]
try:
self._field_array_count[key] += 1
except KeyError:
self._field_array_count[key] = 0
field._name = key + "[%u]" % self._field_array_count[key]
def readFirstFields(self, number):
"""
Read first number fields if they are not read yet.
Returns number of new added fields.
"""
number = number - self.current_length
if 0 < number:
return self.readMoreFields(number)
else:
return 0
def createFields(self):
raise NotImplementedError()
def __iter__(self):
raise NotImplementedError()
def __len__(self):
raise NotImplementedError()
def getField(self, key, const=True):
raise NotImplementedError()
def nextFieldAddress(self):
raise NotImplementedError()
def getFieldIndex(self, field):
raise NotImplementedError()
def readMoreFields(self, number):
raise NotImplementedError()
| gpl-3.0 | 5,695,227,724,252,073,000 | -6,621,875,872,446,512,000 | 31.489796 | 87 | 0.596106 | false |
X-dark/Flexget | flexget/plugins/output/utorrent.py | 7 | 3914 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, absolute_import
import os
from logging import getLogger
from flexget import plugin
from flexget.event import event
from flexget.utils import requests
from flexget.utils.soup import get_soup
from flexget.utils.template import RenderError
log = getLogger('utorrent')
class PluginUtorrent(object):
"""
Parse task content or url for hoster links and adds them to utorrent.
Example::
utorrent:
url: http://localhost:8080/gui/
username: my_username
password: my_password
path: Series
"""
__author__ = 'Nil'
__version__ = '0.1'
schema = {
'type': 'object',
'properties': {
'url': {'type': 'string', 'format': 'url'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'path': {'type': 'string'}
},
'required': ['username', 'password', 'url'],
'additionalProperties': False
}
@plugin.internet(log)
def on_task_output(self, task, config):
if not config.get('enabled', True):
return
if not task.accepted:
return
session = requests.Session()
url = config['url']
if not url.endswith('/'):
url += '/'
auth = (config['username'], config['password'])
# Login
try:
response = session.get(url + 'token.html', auth=auth)
except requests.RequestException as e:
if hasattr(e, 'response') and e.response.status_code == '401':
raise plugin.PluginError('Invalid credentials, check your utorrent webui username and password.', log)
raise plugin.PluginError('%s' % e, log)
token = get_soup(response.text).find('div', id='token').text
result = session.get(url, auth=auth, params={'action': 'list-dirs', 'token': token}).json()
download_dirs = dict((os.path.normcase(dir['path']), i) for i, dir in enumerate(result['download-dirs']))
for entry in task.accepted:
# http://[IP]:[PORT]/gui/?action=add-url&s=[TORRENT URL]
# bunch of urls now going to check
folder = 0
path = entry.get('path', config.get('path', ''))
try:
path = os.path.normcase(os.path.expanduser(entry.render(path)))
except RenderError as e:
log.error('Could not render path for `%s` downloading to default directory.' % entry['title'])
# Add to default folder
path = ''
if path:
for dir in download_dirs:
if path.startswith(dir):
folder = download_dirs[dir]
path = path[len(dir):].lstrip('\\')
break
else:
log.error('path `%s` (or one of its parents)is not added to utorrent webui allowed download '
'directories. You must add it there before you can use it from flexget. '
'Adding to default download directory instead.' % path)
path = ''
if task.options.test:
log.info('Would add `%s` to utorrent' % entry['title'])
continue
# Add torrent
data = {'action': 'add-url', 's': entry['url'], 'token': token, 'download_dir': folder, 'path': path}
result = session.get(url, params=data, auth=auth)
if 'build' in result.json():
log.info('Added `%s` to utorrent' % entry['url'])
log.info('in folder %s ' % folder + path)
else:
entry.fail('Fail to add `%s` to utorrent' % entry['url'])
@event('plugin.register')
def register_plugin():
plugin.register(PluginUtorrent, 'utorrent', api_ver=2)
| mit | 6,782,001,150,796,989,000 | -9,098,382,567,156,271,000 | 35.240741 | 118 | 0.540112 | false |
saisrisathya/whatsapps | build/lib/yowsup/layers/protocol_profiles/layer.py | 31 | 2304 | from yowsup.layers import YowProtocolLayer
from .protocolentities import *
from yowsup.layers.protocol_iq.protocolentities import ErrorIqProtocolEntity, ResultIqProtocolEntity
class YowProfilesProtocolLayer(YowProtocolLayer):
def __init__(self):
handleMap = {
"iq": (self.recvIq, self.sendIq)
}
super(YowProfilesProtocolLayer, self).__init__(handleMap)
def __str__(self):
return "Profiles Layer"
def sendIq(self, entity):
if entity.getXmlns() == "w:profile:picture":
if entity.getType() == "get":
self._sendIq(entity, self.onGetPictureResult, self.onGetPictureError)
elif entity.getType() == "set":
self._sendIq(entity, self.onSetPictureResult, self.onSetPictureError)
elif entity.getType() == "delete":
self._sendIq(entity, self.onDeletePictureResult, self.onDeletePictureError)
elif entity.getXmlns() == "status":
self._sendIq(entity, self.onSetStatusResult, self.onSetStatusError)
def recvIq(self, node):
pass
def onSetStatusResult(self, resultNode, originIqRequestEntity):
self.toUpper(ResultIqProtocolEntity.fromProtocolTreeNode(resultNode))
def onSetStatusError(self, errorNode, originalIqRequestEntity):
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(errorNode))
def onGetPictureResult(self, resultNode, originalIqRequestEntity):
self.toUpper(ResultGetPictureIqProtocolEntity.fromProtocolTreeNode(resultNode))
def onGetPictureError(self, errorNode, originalIqRequestEntity):
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(errorNode))
def onSetPictureResult(self, resultNode, originalIqRequestEntity):
self.toUpper(ResultGetPictureIqProtocolEntity.fromProtocolTreeNode(resultNode))
def onSetPictureError(self, errorNode, originalIqRequestEntity):
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(errorNode))
def onDeletePictureResult(self, resultNode, originalIqRequestEntity):
self.toUpper(ResultIqProtocolEntity.fromProtocolTreeNode(resultNode))
def onDeletePictureError(self, errorNode, originalIqRequestEntity):
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(errorNode))
| gpl-3.0 | -2,348,182,697,796,619,300 | 4,119,206,268,022,131,700 | 44.176471 | 100 | 0.732205 | false |
mjtamlyn/django | tests/invalid_models_tests/test_relative_fields.py | 18 | 60736 | from django.core.checks import Error, Warning as DjangoWarning
from django.db import models
from django.db.models.fields.related import ForeignObject
from django.test.testcases import SimpleTestCase, skipIfDBFeature
from django.test.utils import isolate_apps, override_settings
@isolate_apps('invalid_models_tests')
class RelativeFieldTests(SimpleTestCase):
def test_valid_foreign_key_without_accessor(self):
class Target(models.Model):
# There would be a clash if Model.field installed an accessor.
model = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, models.CASCADE, related_name='+')
field = Model._meta.get_field('field')
errors = field.check()
self.assertEqual(errors, [])
def test_foreign_key_to_missing_model(self):
# Model names are resolved when a model is being created, so we cannot
# test relative fields in isolation and we need to attach them to a
# model.
class Model(models.Model):
foreign_key = models.ForeignKey('Rel1', models.CASCADE)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
"Field defines a relation with model 'Rel1', "
"which is either not installed, or is abstract.",
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
def test_foreign_key_to_isolate_apps_model(self):
"""
#25723 - Referenced model registration lookup should be run against the
field's model registry.
"""
class OtherModel(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('OtherModel', models.CASCADE)
field = Model._meta.get_field('foreign_key')
self.assertEqual(field.check(from_model=Model), [])
def test_many_to_many_to_missing_model(self):
class Model(models.Model):
m2m = models.ManyToManyField("Rel2")
field = Model._meta.get_field('m2m')
errors = field.check(from_model=Model)
expected = [
Error(
"Field defines a relation with model 'Rel2', "
"which is either not installed, or is abstract.",
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
def test_many_to_many_to_isolate_apps_model(self):
"""
#25723 - Referenced model registration lookup should be run against the
field's model registry.
"""
class OtherModel(models.Model):
pass
class Model(models.Model):
m2m = models.ManyToManyField('OtherModel')
field = Model._meta.get_field('m2m')
self.assertEqual(field.check(from_model=Model), [])
def test_many_to_many_with_limit_choices_auto_created_no_warning(self):
class Model(models.Model):
name = models.CharField(max_length=20)
class ModelM2M(models.Model):
m2m = models.ManyToManyField(Model, limit_choices_to={'name': 'test_name'})
self.assertEqual(ModelM2M.check(), [])
def test_many_to_many_with_useless_options(self):
class Model(models.Model):
name = models.CharField(max_length=20)
class ModelM2M(models.Model):
m2m = models.ManyToManyField(
Model,
null=True,
validators=[lambda x: x],
limit_choices_to={'name': 'test_name'},
through='ThroughModel',
through_fields=('modelm2m', 'model'),
)
class ThroughModel(models.Model):
model = models.ForeignKey('Model', models.CASCADE)
modelm2m = models.ForeignKey('ModelM2M', models.CASCADE)
errors = ModelM2M.check()
field = ModelM2M._meta.get_field('m2m')
expected = [
DjangoWarning(
'null has no effect on ManyToManyField.',
obj=field,
id='fields.W340',
),
DjangoWarning(
'ManyToManyField does not support validators.',
obj=field,
id='fields.W341',
),
DjangoWarning(
'limit_choices_to has no effect on ManyToManyField '
'with a through model.',
obj=field,
id='fields.W343',
),
]
self.assertEqual(errors, expected)
def test_ambiguous_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
field = models.ManyToManyField('Person', through="AmbiguousRelationship", related_name='tertiary')
class AmbiguousRelationship(models.Model):
# Too much foreign keys to Person.
first_person = models.ForeignKey(Person, models.CASCADE, related_name="first")
second_person = models.ForeignKey(Person, models.CASCADE, related_name="second")
second_model = models.ForeignKey(Group, models.CASCADE)
field = Group._meta.get_field('field')
errors = field.check(from_model=Group)
expected = [
Error(
"The model is used as an intermediate model by "
"'invalid_models_tests.Group.field', but it has more than one "
"foreign key to 'Person', which is ambiguous. You must specify "
"which foreign key Django should use via the through_fields "
"keyword argument.",
hint=(
'If you want to create a recursive relationship, use '
'ForeignKey("self", symmetrical=False, through="AmbiguousRelationship").'
),
obj=field,
id='fields.E335',
),
]
self.assertEqual(errors, expected)
def test_relationship_model_with_foreign_key_to_wrong_model(self):
class WrongModel(models.Model):
pass
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through="InvalidRelationship")
class InvalidRelationship(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
wrong_foreign_key = models.ForeignKey(WrongModel, models.CASCADE)
# The last foreign key should point to Group model.
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
"The model is used as an intermediate model by "
"'invalid_models_tests.Group.members', but it does not "
"have a foreign key to 'Group' or 'Person'.",
obj=InvalidRelationship,
id='fields.E336',
),
]
self.assertEqual(errors, expected)
def test_relationship_model_missing_foreign_key(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through="InvalidRelationship")
class InvalidRelationship(models.Model):
group = models.ForeignKey(Group, models.CASCADE)
# No foreign key to Person
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
"The model is used as an intermediate model by "
"'invalid_models_tests.Group.members', but it does not have "
"a foreign key to 'Group' or 'Person'.",
obj=InvalidRelationship,
id='fields.E336',
),
]
self.assertEqual(errors, expected)
def test_missing_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through="MissingM2MModel")
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
"Field specifies a many-to-many relation through model "
"'MissingM2MModel', which has not been installed.",
obj=field,
id='fields.E331',
),
]
self.assertEqual(errors, expected)
def test_missing_relationship_model_on_model_check(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through='MissingM2MModel')
self.assertEqual(Group.check(), [
Error(
"Field specifies a many-to-many relation through model "
"'MissingM2MModel', which has not been installed.",
obj=Group._meta.get_field('members'),
id='fields.E331',
),
])
@isolate_apps('invalid_models_tests')
def test_many_to_many_through_isolate_apps_model(self):
"""
#25723 - Through model registration lookup should be run against the
field's model registry.
"""
class GroupMember(models.Model):
person = models.ForeignKey('Person', models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through='GroupMember')
field = Group._meta.get_field('members')
self.assertEqual(field.check(from_model=Group), [])
def test_symmetrical_self_referential_field(self):
class Person(models.Model):
# Implicit symmetrical=False.
friends = models.ManyToManyField('self', through="Relationship")
class Relationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_too_many_foreign_keys_in_self_referential_model(self):
class Person(models.Model):
friends = models.ManyToManyField('self', through="InvalidRelationship", symmetrical=False)
class InvalidRelationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set_2")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set_2")
third = models.ForeignKey(Person, models.CASCADE, related_name="too_many_by_far")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
"The model is used as an intermediate model by "
"'invalid_models_tests.Person.friends', but it has more than two "
"foreign keys to 'Person', which is ambiguous. You must specify "
"which two foreign keys Django should use via the through_fields "
"keyword argument.",
hint='Use through_fields to specify which two foreign keys Django should use.',
obj=InvalidRelationship,
id='fields.E333',
),
]
self.assertEqual(errors, expected)
def test_symmetric_self_reference_with_intermediate_table(self):
class Person(models.Model):
# Explicit symmetrical=True.
friends = models.ManyToManyField('self', through="Relationship", symmetrical=True)
class Relationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_symmetric_self_reference_with_intermediate_table_and_through_fields(self):
"""
Using through_fields in a m2m with an intermediate model shouldn't
mask its incompatibility with symmetry.
"""
class Person(models.Model):
# Explicit symmetrical=True.
friends = models.ManyToManyField(
'self',
symmetrical=True,
through="Relationship",
through_fields=('first', 'second'),
)
class Relationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set")
referee = models.ForeignKey(Person, models.CASCADE, related_name="referred")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
abstract = True
class Model(models.Model):
rel_string_foreign_key = models.ForeignKey('AbstractModel', models.CASCADE)
rel_class_foreign_key = models.ForeignKey(AbstractModel, models.CASCADE)
fields = [
Model._meta.get_field('rel_string_foreign_key'),
Model._meta.get_field('rel_class_foreign_key'),
]
expected_error = Error(
"Field defines a relation with model 'AbstractModel', "
"which is either not installed, or is abstract.",
id='fields.E300',
)
for field in fields:
expected_error.obj = field
errors = field.check()
self.assertEqual(errors, [expected_error])
def test_m2m_to_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
abstract = True
class Model(models.Model):
rel_string_m2m = models.ManyToManyField('AbstractModel')
rel_class_m2m = models.ManyToManyField(AbstractModel)
fields = [
Model._meta.get_field('rel_string_m2m'),
Model._meta.get_field('rel_class_m2m'),
]
expected_error = Error(
"Field defines a relation with model 'AbstractModel', "
"which is either not installed, or is abstract.",
id='fields.E300',
)
for field in fields:
expected_error.obj = field
errors = field.check(from_model=Model)
self.assertEqual(errors, [expected_error])
def test_unique_m2m(self):
class Person(models.Model):
name = models.CharField(max_length=5)
class Group(models.Model):
members = models.ManyToManyField('Person', unique=True)
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
'ManyToManyFields cannot be unique.',
obj=field,
id='fields.E330',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_non_unique_field(self):
class Target(models.Model):
bad = models.IntegerField() # No unique=True
class Model(models.Model):
foreign_key = models.ForeignKey('Target', models.CASCADE, to_field='bad')
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
"'Target.bad' must set unique=True because it is referenced by a foreign key.",
obj=field,
id='fields.E311',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_non_unique_field_under_explicit_model(self):
class Target(models.Model):
bad = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, models.CASCADE, to_field='bad')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'Target.bad' must set unique=True because it is referenced by a foreign key.",
obj=field,
id='fields.E311',
),
]
self.assertEqual(errors, expected)
def test_foreign_object_to_non_unique_fields(self):
class Person(models.Model):
# Note that both fields are not unique.
country_id = models.IntegerField()
city_id = models.IntegerField()
class MMembership(models.Model):
person_country_id = models.IntegerField()
person_city_id = models.IntegerField()
person = models.ForeignObject(
Person,
on_delete=models.CASCADE,
from_fields=['person_country_id', 'person_city_id'],
to_fields=['country_id', 'city_id'],
)
field = MMembership._meta.get_field('person')
errors = field.check()
expected = [
Error(
"No subset of the fields 'country_id', 'city_id' on model 'Person' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
)
]
self.assertEqual(errors, expected)
def test_on_delete_set_null_on_non_nullable_field(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person', models.SET_NULL)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=field,
id='fields.E320',
),
]
self.assertEqual(errors, expected)
def test_on_delete_set_default_without_default_value(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person', models.SET_DEFAULT)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=field,
id='fields.E321',
),
]
self.assertEqual(errors, expected)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_nullable_primary_key(self):
class Model(models.Model):
field = models.IntegerField(primary_key=True, null=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
'Primary keys must not have null=True.',
hint='Set null=False on the field, or remove primary_key=True argument.',
obj=field,
id='fields.E007',
),
]
self.assertEqual(errors, expected)
def test_not_swapped_model(self):
class SwappableModel(models.Model):
# A model that can be, but isn't swapped out. References to this
# model should *not* raise any validation error.
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(
SwappableModel,
models.CASCADE,
related_name='explicit_fk',
)
implicit_fk = models.ForeignKey(
'invalid_models_tests.SwappableModel',
models.CASCADE,
related_name='implicit_fk',
)
explicit_m2m = models.ManyToManyField(SwappableModel, related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappableModel',
related_name='implicit_m2m',
)
explicit_fk = Model._meta.get_field('explicit_fk')
self.assertEqual(explicit_fk.check(), [])
implicit_fk = Model._meta.get_field('implicit_fk')
self.assertEqual(implicit_fk.check(), [])
explicit_m2m = Model._meta.get_field('explicit_m2m')
self.assertEqual(explicit_m2m.check(from_model=Model), [])
implicit_m2m = Model._meta.get_field('implicit_m2m')
self.assertEqual(implicit_m2m.check(from_model=Model), [])
@override_settings(TEST_SWAPPED_MODEL='invalid_models_tests.Replacement')
def test_referencing_to_swapped_model(self):
class Replacement(models.Model):
pass
class SwappedModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(
SwappedModel,
models.CASCADE,
related_name='explicit_fk',
)
implicit_fk = models.ForeignKey(
'invalid_models_tests.SwappedModel',
models.CASCADE,
related_name='implicit_fk',
)
explicit_m2m = models.ManyToManyField(SwappedModel, related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappedModel',
related_name='implicit_m2m',
)
fields = [
Model._meta.get_field('explicit_fk'),
Model._meta.get_field('implicit_fk'),
Model._meta.get_field('explicit_m2m'),
Model._meta.get_field('implicit_m2m'),
]
expected_error = Error(
("Field defines a relation with the model "
"'invalid_models_tests.SwappedModel', which has been swapped out."),
hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.",
id='fields.E301',
)
for field in fields:
expected_error.obj = field
errors = field.check(from_model=Model)
self.assertEqual(errors, [expected_error])
def test_related_field_has_invalid_related_name(self):
digit = 0
illegal_non_alphanumeric = '!'
whitespace = '\t'
invalid_related_names = [
'%s_begins_with_digit' % digit,
'%s_begins_with_illegal_non_alphanumeric' % illegal_non_alphanumeric,
'%s_begins_with_whitespace' % whitespace,
'contains_%s_illegal_non_alphanumeric' % illegal_non_alphanumeric,
'contains_%s_whitespace' % whitespace,
'ends_with_with_illegal_non_alphanumeric_%s' % illegal_non_alphanumeric,
'ends_with_whitespace_%s' % whitespace,
'with', # a Python keyword
'related_name\n',
'',
',', # non-ASCII
]
class Parent(models.Model):
pass
for invalid_related_name in invalid_related_names:
Child = type('Child%s' % invalid_related_name, (models.Model,), {
'parent': models.ForeignKey('Parent', models.CASCADE, related_name=invalid_related_name),
'__module__': Parent.__module__,
})
field = Child._meta.get_field('parent')
errors = Child.check()
expected = [
Error(
"The name '%s' is invalid related_name for field Child%s.parent"
% (invalid_related_name, invalid_related_name),
hint="Related name must be a valid Python identifier or end with a '+'",
obj=field,
id='fields.E306',
),
]
self.assertEqual(errors, expected)
def test_related_field_has_valid_related_name(self):
lowercase = 'a'
uppercase = 'A'
digit = 0
related_names = [
'%s_starts_with_lowercase' % lowercase,
'%s_tarts_with_uppercase' % uppercase,
'_starts_with_underscore',
'contains_%s_digit' % digit,
'ends_with_plus+',
'_+',
'+',
'試',
'試驗+',
]
class Parent(models.Model):
pass
for related_name in related_names:
Child = type('Child%s' % related_name, (models.Model,), {
'parent': models.ForeignKey('Parent', models.CASCADE, related_name=related_name),
'__module__': Parent.__module__,
})
errors = Child.check()
self.assertFalse(errors)
def test_to_fields_exist(self):
class Parent(models.Model):
pass
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
)
field = Child._meta.get_field('parent')
expected = [
Error(
"The to_field 'a' doesn't exist on the related model 'invalid_models_tests.Parent'.",
obj=field,
id='fields.E312',
),
Error(
"The to_field 'b' doesn't exist on the related model 'invalid_models_tests.Parent'.",
obj=field,
id='fields.E312',
),
]
self.assertEqual(field.check(), expected)
def test_to_fields_not_checked_if_related_model_doesnt_exist(self):
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
parent = ForeignObject(
'invalid_models_tests.Parent',
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
)
field = Child._meta.get_field('parent')
self.assertEqual(field.check(), [
Error(
"Field defines a relation with model 'invalid_models_tests.Parent', "
"which is either not installed, or is abstract.",
id='fields.E300',
obj=field,
),
])
def test_invalid_related_query_name(self):
class Target(models.Model):
pass
class Model(models.Model):
first = models.ForeignKey(Target, models.CASCADE, related_name='contains__double')
second = models.ForeignKey(Target, models.CASCADE, related_query_name='ends_underscore_')
self.assertEqual(Model.check(), [
Error(
"Reverse query name 'contains__double' must not contain '__'.",
hint=("Add or change a related_name or related_query_name "
"argument for this field."),
obj=Model._meta.get_field('first'),
id='fields.E309',
),
Error(
"Reverse query name 'ends_underscore_' must not end with an "
"underscore.",
hint=("Add or change a related_name or related_query_name "
"argument for this field."),
obj=Model._meta.get_field('second'),
id='fields.E308',
),
])
@isolate_apps('invalid_models_tests')
class AccessorClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE))
def test_m2m_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_accessor_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model_set = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.rel' clashes with field name 'Target.model_set'.",
hint=("Rename field 'Target.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_clash_between_accessors(self):
class Target(models.Model):
pass
class Model(models.Model):
foreign = models.ForeignKey(Target, models.CASCADE)
m2m = models.ManyToManyField(Target)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign' clashes with reverse accessor for 'Model.m2m'.",
hint=(
"Add or change a related_name argument to the definition "
"for 'Model.foreign' or 'Model.m2m'."
),
obj=Model._meta.get_field('foreign'),
id='fields.E304',
),
Error(
"Reverse accessor for 'Model.m2m' clashes with reverse accessor for 'Model.foreign'.",
hint=(
"Add or change a related_name argument to the definition "
"for 'Model.m2m' or 'Model.foreign'."
),
obj=Model._meta.get_field('m2m'),
id='fields.E304',
),
]
self.assertEqual(errors, expected)
def test_m2m_to_m2m_with_inheritance(self):
""" Ref #22047. """
class Target(models.Model):
pass
class Model(models.Model):
children = models.ManyToManyField('Child', related_name="m2m_clash", related_query_name="no_clash")
class Parent(models.Model):
m2m_clash = models.ManyToManyField('Target')
class Child(Parent):
pass
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.children' clashes with field name 'Child.m2m_clash'.",
hint=(
"Rename field 'Child.m2m_clash', or add/change a related_name "
"argument to the definition for field 'Model.children'."
),
obj=Model._meta.get_field('children'),
id='fields.E302',
)
]
self.assertEqual(errors, expected)
def test_no_clash_for_hidden_related_name(self):
class Stub(models.Model):
pass
class ManyToManyRel(models.Model):
thing1 = models.ManyToManyField(Stub, related_name='+')
thing2 = models.ManyToManyField(Stub, related_name='+')
class FKRel(models.Model):
thing1 = models.ForeignKey(Stub, models.CASCADE, related_name='+')
thing2 = models.ForeignKey(Stub, models.CASCADE, related_name='+')
self.assertEqual(ManyToManyRel.check(), [])
self.assertEqual(FKRel.check(), [])
@isolate_apps('invalid_models_tests')
class ReverseQueryNameClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE))
def test_m2m_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_reverse_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.model'.",
hint=(
"Rename field 'Target.model', or add/change a related_name "
"argument to the definition for field 'Model.rel'."
),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ExplicitRelatedNameClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_fk_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_fk_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_m2m_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target', related_name='clash'))
def _test_explicit_related_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.rel' clashes with field name 'Target.clash'.",
hint=(
"Rename field 'Target.clash', or add/change a related_name "
"argument to the definition for field 'Model.rel'."
),
obj=Model._meta.get_field('rel'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.",
hint=(
"Rename field 'Target.clash', or add/change a related_name "
"argument to the definition for field 'Model.rel'."
),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ExplicitRelatedQueryNameClashTests(SimpleTestCase):
def test_fk_to_integer(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey(
'Target',
models.CASCADE,
related_name=related_name,
related_query_name='clash',
)
)
def test_hidden_fk_to_integer(self, related_name=None):
self.test_fk_to_integer(related_name='+')
def test_fk_to_fk(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey(
'Target',
models.CASCADE,
related_name=related_name,
related_query_name='clash',
)
)
def test_hidden_fk_to_fk(self):
self.test_fk_to_fk(related_name='+')
def test_fk_to_m2m(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey(
'Target',
models.CASCADE,
related_name=related_name,
related_query_name='clash',
)
)
def test_hidden_fk_to_m2m(self):
self.test_fk_to_m2m(related_name='+')
def test_m2m_to_integer(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target', related_name=related_name, related_query_name='clash'))
def test_hidden_m2m_to_integer(self):
self.test_m2m_to_integer(related_name='+')
def test_m2m_to_fk(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target', related_name=related_name, related_query_name='clash'))
def test_hidden_m2m_to_fk(self):
self.test_m2m_to_fk(related_name='+')
def test_m2m_to_m2m(self, related_name=None):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField(
'Target',
related_name=related_name,
related_query_name='clash',
)
)
def test_hidden_m2m_to_m2m(self):
self.test_m2m_to_m2m(related_name='+')
def _test_explicit_related_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.",
hint=(
"Rename field 'Target.clash', or add/change a related_name "
"argument to the definition for field 'Model.rel'."
),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class SelfReferentialM2MClashTests(SimpleTestCase):
def test_clash_between_accessors(self):
class Model(models.Model):
first_m2m = models.ManyToManyField('self', symmetrical=False)
second_m2m = models.ManyToManyField('self', symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.first_m2m' clashes with reverse accessor for 'Model.second_m2m'.",
hint=(
"Add or change a related_name argument to the definition "
"for 'Model.first_m2m' or 'Model.second_m2m'."
),
obj=Model._meta.get_field('first_m2m'),
id='fields.E304',
),
Error(
"Reverse accessor for 'Model.second_m2m' clashes with reverse accessor for 'Model.first_m2m'.",
hint=(
"Add or change a related_name argument to the definition "
"for 'Model.second_m2m' or 'Model.first_m2m'."
),
obj=Model._meta.get_field('second_m2m'),
id='fields.E304',
),
]
self.assertEqual(errors, expected)
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ManyToManyField("self", symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.",
hint=(
"Rename field 'Model.model_set', or add/change a related_name "
"argument to the definition for field 'Model.model_set'."
),
obj=Model._meta.get_field('model_set'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ManyToManyField("self", symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.model' clashes with field name 'Model.model'.",
hint=(
"Rename field 'Model.model', or add/change a related_name "
"argument to the definition for field 'Model.model'."
),
obj=Model._meta.get_field('model'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.IntegerField()
m2m = models.ManyToManyField("self", symmetrical=False, related_name='clash')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.m2m' clashes with field name 'Model.clash'.",
hint=(
"Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.m2m'."
),
obj=Model._meta.get_field('m2m'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.m2m' clashes with field name 'Model.clash'.",
hint=(
"Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.m2m'."
),
obj=Model._meta.get_field('m2m'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_valid_model(self):
class Model(models.Model):
first = models.ManyToManyField("self", symmetrical=False, related_name='first_accessor')
second = models.ManyToManyField("self", symmetrical=False, related_name='second_accessor')
errors = Model.check()
self.assertEqual(errors, [])
@isolate_apps('invalid_models_tests')
class SelfReferentialFKClashTests(SimpleTestCase):
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ForeignKey("Model", models.CASCADE)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.",
hint=(
"Rename field 'Model.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.model_set'."
),
obj=Model._meta.get_field('model_set'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ForeignKey("Model", models.CASCADE)
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.model' clashes with field name 'Model.model'.",
hint=(
"Rename field 'Model.model', or add/change a related_name "
"argument to the definition for field 'Model.model'."
),
obj=Model._meta.get_field('model'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.CharField(max_length=10)
foreign = models.ForeignKey("Model", models.CASCADE, related_name='clash')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign' clashes with field name 'Model.clash'.",
hint=(
"Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.foreign'."
),
obj=Model._meta.get_field('foreign'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.foreign' clashes with field name 'Model.clash'.",
hint=(
"Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.foreign'."
),
obj=Model._meta.get_field('foreign'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ComplexClashTests(SimpleTestCase):
# New tests should not be included here, because this is a single,
# self-contained sanity check, not a test of everything.
def test_complex_clash(self):
class Target(models.Model):
tgt_safe = models.CharField(max_length=10)
clash = models.CharField(max_length=10)
model = models.CharField(max_length=10)
clash1_set = models.CharField(max_length=10)
class Model(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target, models.CASCADE, related_name='id')
foreign_2 = models.ForeignKey(Target, models.CASCADE, related_name='src_safe')
m2m_1 = models.ManyToManyField(Target, related_name='id')
m2m_2 = models.ManyToManyField(Target, related_name='src_safe')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.foreign_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.foreign_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.foreign_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E303',
),
Error(
"Reverse accessor for 'Model.foreign_1' clashes with reverse accessor for 'Model.m2m_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_1' or 'Model.m2m_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.foreign_1' clashes with reverse query name for 'Model.m2m_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_1' or 'Model.m2m_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.foreign_2' clashes with reverse accessor for 'Model.m2m_2'.",
hint=("Add or change a related_name argument "
"to the definition for 'Model.foreign_2' or 'Model.m2m_2'."),
obj=Model._meta.get_field('foreign_2'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.foreign_2' clashes with reverse query name for 'Model.m2m_2'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_2' or 'Model.m2m_2'."),
obj=Model._meta.get_field('foreign_2'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.m2m_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.m2m_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.m2m_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.m2m_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E303',
),
Error(
"Reverse accessor for 'Model.m2m_1' clashes with reverse accessor for 'Model.foreign_1'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_1' or 'Model.foreign_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.m2m_1' clashes with reverse query name for 'Model.foreign_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.m2m_1' or 'Model.foreign_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.m2m_2' clashes with reverse accessor for 'Model.foreign_2'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_2' or 'Model.foreign_2'."),
obj=Model._meta.get_field('m2m_2'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.m2m_2' clashes with reverse query name for 'Model.foreign_2'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_2' or 'Model.foreign_2'."),
obj=Model._meta.get_field('m2m_2'),
id='fields.E305',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class M2mThroughFieldsTests(SimpleTestCase):
def test_m2m_field_argument_validation(self):
"""
ManyToManyField accepts the ``through_fields`` kwarg
only if an intermediary table is specified.
"""
class Fan(models.Model):
pass
with self.assertRaisesMessage(ValueError, 'Cannot specify through_fields without a through model'):
models.ManyToManyField(Fan, through_fields=('f1', 'f2'))
def test_invalid_order(self):
"""
Mixing up the order of link fields to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=('invitee', 'event'))
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
"'Invitation.invitee' is not a foreign key to 'Event'.",
hint="Did you mean one of the following foreign keys to 'Event': event?",
obj=field,
id='fields.E339',
),
Error(
"'Invitation.event' is not a foreign key to 'Fan'.",
hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?",
obj=field,
id='fields.E339',
),
]
self.assertEqual(expected, errors)
def test_invalid_field(self):
"""
Providing invalid field names to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(
Fan,
through='Invitation',
through_fields=('invalid_field_1', 'invalid_field_2'),
)
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
"The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_1'.",
hint="Did you mean one of the following foreign keys to 'Event': event?",
obj=field,
id='fields.E338',
),
Error(
"The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_2'.",
hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?",
obj=field,
id='fields.E338',
),
]
self.assertEqual(expected, errors)
def test_explicit_field_names(self):
"""
If ``through_fields`` kwarg is given, it must specify both
link fields of the intermediary table.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=(None, 'invitee'))
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
"Field specifies 'through_fields' but does not provide the names "
"of the two link fields that should be used for the relation "
"through model 'invalid_models_tests.Invitation'.",
hint="Make sure you specify 'through_fields' as through_fields=('field1', 'field2')",
obj=field,
id='fields.E337')]
self.assertEqual(expected, errors)
def test_superset_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b', 'c'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
related_name='children',
)
field = Child._meta.get_field('parent')
errors = field.check(from_model=Child)
expected = [
Error(
"No subset of the fields 'a', 'b' on model 'Parent' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
),
]
self.assertEqual(expected, errors)
def test_intersection_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
d = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b', 'c'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
d = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b', 'd'),
to_fields=('a', 'b', 'd'),
related_name='children',
)
field = Child._meta.get_field('parent')
errors = field.check(from_model=Child)
expected = [
Error(
"No subset of the fields 'a', 'b', 'd' on model 'Parent' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
),
]
self.assertEqual(expected, errors)
| bsd-3-clause | -7,992,100,837,373,469,000 | 1,355,754,971,578,557,000 | 36.555968 | 111 | 0.550224 | false |
Arakmar/Sick-Beard | cherrypy/process/wspbus.py | 45 | 14462 | """An implementation of the Web Site Process Bus.
This module is completely standalone, depending only on the stdlib.
Web Site Process Bus
--------------------
A Bus object is used to contain and manage site-wide behavior:
daemonization, HTTP server start/stop, process reload, signal handling,
drop privileges, PID file management, logging for all of these,
and many more.
In addition, a Bus object provides a place for each web framework
to register code that runs in response to site-wide events (like
process start and stop), or which controls or otherwise interacts with
the site-wide components mentioned above. For example, a framework which
uses file-based templates would add known template filenames to an
autoreload component.
Ideally, a Bus object will be flexible enough to be useful in a variety
of invocation scenarios:
1. The deployer starts a site from the command line via a framework-
neutral deployment script; applications from multiple frameworks
are mixed in a single site. Command-line arguments and configuration
files are used to define site-wide components such as the HTTP server,
WSGI component graph, autoreload behavior, signal handling, etc.
2. The deployer starts a site via some other process, such as Apache;
applications from multiple frameworks are mixed in a single site.
Autoreload and signal handling (from Python at least) are disabled.
3. The deployer starts a site via a framework-specific mechanism;
for example, when running tests, exploring tutorials, or deploying
single applications from a single framework. The framework controls
which site-wide components are enabled as it sees fit.
The Bus object in this package uses topic-based publish-subscribe
messaging to accomplish all this. A few topic channels are built in
('start', 'stop', 'exit', 'graceful', 'log', and 'main'). Frameworks and
site containers are free to define their own. If a message is sent to a
channel that has not been defined or has no listeners, there is no effect.
In general, there should only ever be a single Bus object per process.
Frameworks and site containers share a single Bus object by publishing
messages and subscribing listeners.
The Bus object works as a finite state machine which models the current
state of the process. Bus methods move it from one state to another;
those methods then publish to subscribed listeners on the channel for
the new state.
O
|
V
STOPPING --> STOPPED --> EXITING -> X
A A |
| \___ |
| \ |
| V V
STARTED <-- STARTING
"""
import atexit
import os
try:
set
except NameError:
from sets import Set as set
import sys
import threading
import time
import traceback as _traceback
import warnings
# Here I save the value of os.getcwd(), which, if I am imported early enough,
# will be the directory from which the startup script was run. This is needed
# by _do_execv(), to change back to the original directory before execv()ing a
# new process. This is a defense against the application having changed the
# current working directory (which could make sys.executable "not found" if
# sys.executable is a relative-path, and/or cause other problems).
_startup_cwd = os.getcwd()
class ChannelFailures(Exception):
delimiter = '\n'
def __init__(self, *args, **kwargs):
# Don't use 'super' here; Exceptions are old-style in Py2.4
# See http://www.cherrypy.org/ticket/959
Exception.__init__(self, *args, **kwargs)
self._exceptions = list()
def handle_exception(self):
self._exceptions.append(sys.exc_info())
def get_instances(self):
return [instance for cls, instance, traceback in self._exceptions]
def __str__(self):
exception_strings = map(repr, self.get_instances())
return self.delimiter.join(exception_strings)
def __nonzero__(self):
return bool(self._exceptions)
# Use a flag to indicate the state of the bus.
class _StateEnum(object):
class State(object):
name = None
def __repr__(self):
return "states.%s" % self.name
def __setattr__(self, key, value):
if isinstance(value, self.State):
value.name = key
object.__setattr__(self, key, value)
states = _StateEnum()
states.STOPPED = states.State()
states.STARTING = states.State()
states.STARTED = states.State()
states.STOPPING = states.State()
states.EXITING = states.State()
class Bus(object):
"""Process state-machine and messenger for HTTP site deployment.
All listeners for a given channel are guaranteed to be called even
if others at the same channel fail. Each failure is logged, but
execution proceeds on to the next listener. The only way to stop all
processing from inside a listener is to raise SystemExit and stop the
whole server.
"""
states = states
state = states.STOPPED
execv = False
def __init__(self):
self.execv = False
self.state = states.STOPPED
self.listeners = dict(
[(channel, set()) for channel
in ('start', 'stop', 'exit', 'graceful', 'log', 'main')])
self._priorities = {}
def subscribe(self, channel, callback, priority=None):
"""Add the given callback at the given channel (if not present)."""
if channel not in self.listeners:
self.listeners[channel] = set()
self.listeners[channel].add(callback)
if priority is None:
priority = getattr(callback, 'priority', 50)
self._priorities[(channel, callback)] = priority
def unsubscribe(self, channel, callback):
"""Discard the given callback (if present)."""
listeners = self.listeners.get(channel)
if listeners and callback in listeners:
listeners.discard(callback)
del self._priorities[(channel, callback)]
def publish(self, channel, *args, **kwargs):
"""Return output of all subscribers for the given channel."""
if channel not in self.listeners:
return []
exc = ChannelFailures()
output = []
items = [(self._priorities[(channel, listener)], listener)
for listener in self.listeners[channel]]
items.sort()
for priority, listener in items:
try:
output.append(listener(*args, **kwargs))
except KeyboardInterrupt:
raise
except SystemExit, e:
# If we have previous errors ensure the exit code is non-zero
if exc and e.code == 0:
e.code = 1
raise
except:
exc.handle_exception()
if channel == 'log':
# Assume any further messages to 'log' will fail.
pass
else:
self.log("Error in %r listener %r" % (channel, listener),
level=40, traceback=True)
if exc:
raise exc
return output
def _clean_exit(self):
"""An atexit handler which asserts the Bus is not running."""
if self.state != states.EXITING:
warnings.warn(
"The main thread is exiting, but the Bus is in the %r state; "
"shutting it down automatically now. You must either call "
"bus.block() after start(), or call bus.exit() before the "
"main thread exits." % self.state, RuntimeWarning)
self.exit()
def start(self):
"""Start all services."""
atexit.register(self._clean_exit)
self.state = states.STARTING
self.log('Bus STARTING')
try:
self.publish('start')
self.state = states.STARTED
self.log('Bus STARTED')
except (KeyboardInterrupt, SystemExit):
raise
except:
self.log("Shutting down due to error in start listener:",
level=40, traceback=True)
e_info = sys.exc_info()
try:
self.exit()
except:
# Any stop/exit errors will be logged inside publish().
pass
raise e_info[0], e_info[1], e_info[2]
def exit(self):
"""Stop all services and prepare to exit the process."""
exitstate = self.state
try:
self.stop()
self.state = states.EXITING
self.log('Bus EXITING')
self.publish('exit')
# This isn't strictly necessary, but it's better than seeing
# "Waiting for child threads to terminate..." and then nothing.
self.log('Bus EXITED')
except:
# This method is often called asynchronously (whether thread,
# signal handler, console handler, or atexit handler), so we
# can't just let exceptions propagate out unhandled.
# Assume it's been logged and just die.
os._exit(70) # EX_SOFTWARE
if exitstate == states.STARTING:
# exit() was called before start() finished, possibly due to
# Ctrl-C because a start listener got stuck. In this case,
# we could get stuck in a loop where Ctrl-C never exits the
# process, so we just call os.exit here.
os._exit(70) # EX_SOFTWARE
def restart(self):
"""Restart the process (may close connections).
This method does not restart the process from the calling thread;
instead, it stops the bus and asks the main thread to call execv.
"""
self.execv = True
self.exit()
def graceful(self):
"""Advise all services to reload."""
self.log('Bus graceful')
self.publish('graceful')
def block(self, interval=0.1):
"""Wait for the EXITING state, KeyboardInterrupt or SystemExit.
This function is intended to be called only by the main thread.
After waiting for the EXITING state, it also waits for all threads
to terminate, and then calls os.execv if self.execv is True. This
design allows another thread to call bus.restart, yet have the main
thread perform the actual execv call (required on some platforms).
"""
try:
self.wait(states.EXITING, interval=interval, channel='main')
except (KeyboardInterrupt, IOError):
# The time.sleep call might raise
# "IOError: [Errno 4] Interrupted function call" on KBInt.
self.log('Keyboard Interrupt: shutting down bus')
self.exit()
except SystemExit:
self.log('SystemExit raised: shutting down bus')
self.exit()
raise
# Waiting for ALL child threads to finish is necessary on OS X.
# See http://www.cherrypy.org/ticket/581.
# It's also good to let them all shut down before allowing
# the main thread to call atexit handlers.
# See http://www.cherrypy.org/ticket/751.
self.log("Waiting for child threads to terminate...")
for t in threading.enumerate():
if t != threading.currentThread() and t.isAlive():
# Note that any dummy (external) threads are always daemonic.
if hasattr(threading.Thread, "daemon"):
# Python 2.6+
d = t.daemon
else:
d = t.isDaemon()
if not d:
t.join()
if self.execv:
self._do_execv()
def wait(self, state, interval=0.1, channel=None):
"""Wait for the given state(s)."""
if isinstance(state, (tuple, list)):
states = state
else:
states = [state]
def _wait():
while self.state not in states:
time.sleep(interval)
self.publish(channel)
# From http://psyco.sourceforge.net/psycoguide/bugs.html:
# "The compiled machine code does not include the regular polling
# done by Python, meaning that a KeyboardInterrupt will not be
# detected before execution comes back to the regular Python
# interpreter. Your program cannot be interrupted if caught
# into an infinite Psyco-compiled loop."
try:
sys.modules['psyco'].cannotcompile(_wait)
except (KeyError, AttributeError):
pass
_wait()
def _do_execv(self):
"""Re-execute the current process.
This must be called from the main thread, because certain platforms
(OS X) don't allow execv to be called in a child thread very well.
"""
args = sys.argv[:]
self.log('Re-spawning %s' % ' '.join(args))
args.insert(0, sys.executable)
if sys.platform == 'win32':
args = ['"%s"' % arg for arg in args]
os.chdir(_startup_cwd)
os.execv(sys.executable, args)
def stop(self):
"""Stop all services."""
self.state = states.STOPPING
self.log('Bus STOPPING')
self.publish('stop')
self.state = states.STOPPED
self.log('Bus STOPPED')
def start_with_callback(self, func, args=None, kwargs=None):
"""Start 'func' in a new thread T, then start self (and return T)."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
args = (func,) + args
def _callback(func, *a, **kw):
self.wait(states.STARTED)
func(*a, **kw)
t = threading.Thread(target=_callback, args=args, kwargs=kwargs)
t.setName('Bus Callback ' + t.getName())
t.start()
self.start()
return t
def log(self, msg="", level=20, traceback=False):
"""Log the given message. Append the last traceback if requested."""
if traceback:
exc = sys.exc_info()
msg += "\n" + "".join(_traceback.format_exception(*exc))
self.publish('log', msg, level)
bus = Bus()
| gpl-3.0 | 439,441,644,083,089,000 | 1,544,189,613,833,875,000 | 36.661458 | 78 | 0.596875 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/operations/_virtual_router_peerings_operations.py | 1 | 22472 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualRouterPeeringsOperations(object):
"""VirtualRouterPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified peering from a Virtual Router.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualRouterPeering"
"""Gets the specified Virtual Router Peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the Virtual Router Peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualRouterPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.VirtualRouterPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
parameters, # type: "_models.VirtualRouterPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualRouterPeering"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualRouterPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
parameters, # type: "_models.VirtualRouterPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualRouterPeering"]
"""Creates or updates the specified Virtual Router Peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the Virtual Router Peering.
:type peering_name: str
:param parameters: Parameters supplied to the create or update Virtual Router Peering
operation.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.VirtualRouterPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualRouterPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_08_01.models.VirtualRouterPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
peering_name=peering_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualRouterPeeringListResult"]
"""Lists all Virtual Router Peerings in a Virtual Router resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualRouterPeeringListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_08_01.models.VirtualRouterPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualRouterPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings'} # type: ignore
| mit | -6,657,973,365,144,554,000 | 1,925,307,994,146,391,800 | 49.498876 | 220 | 0.643868 | false |
mynlp/ccg2lambda | scripts/run_tests.py | 1 | 3569 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright 2015 Pascual Martinez-Gomez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from abduction_tools_test import GetPremisesThatMatchConclusionArgsTestCase
from abduction_tools_test import GetTreePredArgsTestCase
from category_test import CategoryTestCase
from ccg2lambda_tools_test import AssignSemanticsToCCGTestCase
from ccg2lambda_tools_test import AssignSemanticsToCCGWithFeatsTestCase
from ccg2lambda_tools_test import get_attributes_from_ccg_node_recursivelyTestCase
from ccg2lambda_tools_test import TypeRaiseTestCase
from knowledge_test import LexicalRelationsTestCase
from nltk2coq_test import Nltk2coqTestCase
from semantic_index_test import GetSemanticRepresentationTestCase
from semantic_tools_test import resolve_prefix_to_infix_operationsTestCase
from semantic_types_test import ArbiAutoTypesTestCase
from semantic_types_test import build_arbitrary_dynamic_libraryTestCase
from semantic_types_test import build_dynamic_libraryTestCase
from semantic_types_test import Coq2NLTKTypesTestCase
from semantic_types_test import Coq2NLTKSignaturesTestCase
from semantic_types_test import combine_signatures_or_rename_predsTestCase
if __name__ == '__main__':
suite1 = unittest.TestLoader().loadTestsFromTestCase(AssignSemanticsToCCGTestCase)
suite2 = unittest.TestLoader().loadTestsFromTestCase(AssignSemanticsToCCGWithFeatsTestCase)
suite3 = unittest.TestLoader().loadTestsFromTestCase(TypeRaiseTestCase)
suite4 = unittest.TestLoader().loadTestsFromTestCase(build_dynamic_libraryTestCase)
suite5 = unittest.TestLoader().loadTestsFromTestCase(resolve_prefix_to_infix_operationsTestCase)
suite6 = unittest.TestLoader().loadTestsFromTestCase(Nltk2coqTestCase)
suite7 = unittest.TestLoader().loadTestsFromTestCase(build_arbitrary_dynamic_libraryTestCase)
suite8 = unittest.TestLoader().loadTestsFromTestCase(LexicalRelationsTestCase)
suite9 = unittest.TestLoader().loadTestsFromTestCase(Coq2NLTKTypesTestCase)
suite10 = unittest.TestLoader().loadTestsFromTestCase(Coq2NLTKSignaturesTestCase)
suite11 = unittest.TestLoader().loadTestsFromTestCase(ArbiAutoTypesTestCase)
suite12 = unittest.TestLoader().loadTestsFromTestCase(get_attributes_from_ccg_node_recursivelyTestCase)
suite13 = unittest.TestLoader().loadTestsFromTestCase(GetSemanticRepresentationTestCase)
suite14 = unittest.TestLoader().loadTestsFromTestCase(GetTreePredArgsTestCase)
suite15 = unittest.TestLoader().loadTestsFromTestCase(GetPremisesThatMatchConclusionArgsTestCase)
suite16 = unittest.TestLoader().loadTestsFromTestCase(combine_signatures_or_rename_predsTestCase)
suite17 = unittest.TestLoader().loadTestsFromTestCase(CategoryTestCase)
suites = unittest.TestSuite([suite1, suite2, suite3, suite4, suite5, suite6,
suite7, suite8, suite9, suite10, suite11, suite12,
suite13, suite14, suite15, suite16, suite17])
unittest.TextTestRunner(verbosity=2).run(suites)
| apache-2.0 | -4,183,059,230,529,001,000 | -7,863,441,287,393,614,000 | 59.491525 | 107 | 0.804147 | false |
apporc/neutron | neutron/extensions/extra_dhcp_opt.py | 5 | 3380 | # Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions
# ExtraDHcpOpts Exceptions
class ExtraDhcpOptNotFound(exceptions.NotFound):
message = _("ExtraDhcpOpt %(id)s could not be found")
class ExtraDhcpOptBadData(exceptions.InvalidInput):
message = _("Invalid data format for extra-dhcp-opt: %(data)s")
# Valid blank extra dhcp opts
VALID_BLANK_EXTRA_DHCP_OPTS = ('router', 'classless-static-route')
# Common definitions for maximum string field length
DHCP_OPT_NAME_MAX_LEN = 64
DHCP_OPT_VALUE_MAX_LEN = 255
EXTRA_DHCP_OPT_KEY_SPECS = {
'id': {'type:uuid': None, 'required': False},
'opt_name': {'type:not_empty_string': DHCP_OPT_NAME_MAX_LEN,
'required': True},
'opt_value': {'type:not_empty_string_or_none': DHCP_OPT_VALUE_MAX_LEN,
'required': True},
'ip_version': {'convert_to': attr.convert_to_int,
'type:values': [4, 6],
'required': False}
}
def _validate_extra_dhcp_opt(data, key_specs=None):
if data is not None:
if not isinstance(data, list):
raise ExtraDhcpOptBadData(data=data)
for d in data:
if d['opt_name'] in VALID_BLANK_EXTRA_DHCP_OPTS:
msg = attr._validate_string_or_none(d['opt_value'],
DHCP_OPT_VALUE_MAX_LEN)
else:
msg = attr._validate_dict(d, key_specs)
if msg:
raise ExtraDhcpOptBadData(data=msg)
attr.validators['type:list_of_extra_dhcp_opts'] = _validate_extra_dhcp_opt
# Attribute Map
EXTRADHCPOPTS = 'extra_dhcp_opts'
CLIENT_ID = "client-id"
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
EXTRADHCPOPTS: {
'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'validate': {
'type:list_of_extra_dhcp_opts': EXTRA_DHCP_OPT_KEY_SPECS
}
}
}
}
class Extra_dhcp_opt(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron Extra DHCP opts"
@classmethod
def get_alias(cls):
return "extra_dhcp_opt"
@classmethod
def get_description(cls):
return ("Extra options configuration for DHCP. "
"For example PXE boot options to DHCP clients can "
"be specified (e.g. tftp-server, server-ip-address, "
"bootfile-name)")
@classmethod
def get_updated(cls):
return "2013-03-17T12:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apache-2.0 | -9,009,973,161,823,067,000 | -6,774,587,673,502,368,000 | 29.727273 | 75 | 0.62071 | false |
lucaspcamargo/litmus-rt | tools/perf/scripts/python/sched-migration.py | 1910 | 11965 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm, common_callchain,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, comm, pid):
pass
def trace_unhandled(event_name, context, event_fields_dict):
pass
| gpl-2.0 | 600,475,529,066,139,000 | -7,878,877,748,275,862,000 | 25.01087 | 88 | 0.679733 | false |
apache/incubator-allura | ForgeSVN/forgesvn/tests/model/test_repository.py | 1 | 44587 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import shutil
import unittest
import pkg_resources
from itertools import count, product
from datetime import datetime
from zipfile import ZipFile
from collections import defaultdict
from pylons import tmpl_context as c, app_globals as g
import mock
from nose.tools import assert_equal
import tg
import ming
from ming.base import Object
from ming.orm import session, ThreadLocalORMSession
from testfixtures import TempDirectory
from IPython.testing.decorators import onlyif
from alluratest.controller import setup_basic_test, setup_global_objects
from allura import model as M
from allura.model.repo_refresh import send_notifications
from allura.lib import helpers as h
from allura.tests.model.test_repo import RepoImplTestBase
from forgesvn import model as SM
from forgesvn.model.svn import svn_path_exists
from forgesvn.tests import with_svn
from allura.tests.decorators import with_tool
class TestNewRepo(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
self.repo = SM.Repository(
name='testsvn',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.repo.refresh()
self.rev = self.repo.commit('HEAD')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_last_commit_for(self):
tree = self.rev.tree
for row in tree.ls():
assert row['last_commit']['author'] is not None
def test_commit(self):
assert self.rev.primary() is self.rev
assert self.rev.index_id().startswith('allura/model/repo/Commit#')
self.rev.author_url
self.rev.committer_url
assert self.rev.tree._id == self.rev.tree_id
assert self.rev.shorthand_id() == '[r6]'
assert self.rev.symbolic_ids == ([], [])
assert self.rev.url() == (
'/p/test/src/6/')
all_cis = list(self.repo.log(self.rev._id))
assert len(all_cis) == 6
self.rev.tree.ls()
assert self.rev.tree.readme() == (
'README', 'This is readme\nAnother Line\n')
assert self.rev.tree.path() == '/'
assert self.rev.tree.url() == (
'/p/test/src/6/tree/')
self.rev.tree.by_name['README']
assert self.rev.tree.is_blob('README') == True
assert self.rev.tree['a']['b']['c'].ls() == []
self.assertRaises(KeyError, lambda: self.rev.tree['a']['b']['d'])
class TestSVNRepo(unittest.TestCase, RepoImplTestBase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
@with_tool('test', 'SVN', 'svn-tags', 'SVN with tags')
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
self.repo = SM.Repository(
name='testsvn',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.repo.refresh()
self.svn_tags = SM.Repository(
name='testsvn-trunk-tags-branches',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.svn_tags.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_init(self):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
shutil.rmtree(dirname)
def test_fork(self):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
repo._impl.clone_from('file://' + repo_path)
assert not os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/pre-revprop-change'))
assert os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'))
assert os.access(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'), os.X_OK)
with open(os.path.join(g.tmpdir, 'testsvn/hooks/post-commit')) as f:
hook_data = f.read()
self.assertIn(
'curl -s http://localhost/auth/refresh_repo/p/test/src/\n',
hook_data)
self.assertIn('exec $DIR/post-commit-user "$@"\n', hook_data)
repo.refresh(notify=False)
assert len(list(repo.log()))
shutil.rmtree(dirname)
@mock.patch('forgesvn.model.svn.tg')
def test_can_hotcopy(self, tg):
from forgesvn.model.svn import SVNImplementation
func = SVNImplementation.can_hotcopy
obj = mock.Mock(spec=SVNImplementation)
for combo in product(
['file:///myfile', 'http://myfile'],
[True, False],
['version 1.7', 'version 1.6', 'version 2.0.3']):
source_url = combo[0]
tg.config = {'scm.svn.hotcopy': combo[1]}
stdout = combo[2]
obj.check_call.return_value = stdout, ''
expected = (source_url.startswith('file://') and
tg.config['scm.svn.hotcopy'] and
stdout != 'version 1.6')
result = func(obj, source_url)
assert result == expected
@mock.patch('forgesvn.model.svn.g.post_event')
def test_clone(self, post_event):
repo = SM.Repository(
name='testsvn',
fs_path=g.tmpdir + '/',
url_path='/test/',
tool='svn',
status='creating')
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
dirname = os.path.join(repo.fs_path, repo.name)
if os.path.exists(dirname):
shutil.rmtree(dirname)
repo.init()
repo._impl.clone_from('file://' + repo_path)
assert not os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/pre-revprop-change'))
assert os.path.exists(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'))
assert os.access(
os.path.join(g.tmpdir, 'testsvn/hooks/post-commit'), os.X_OK)
with open(os.path.join(g.tmpdir, 'testsvn/hooks/post-commit')) as f:
c = f.read()
self.assertIn(
'curl -s http://localhost/auth/refresh_repo/p/test/src/\n', c)
self.assertIn('exec $DIR/post-commit-user "$@"\n', c)
repo.refresh(notify=False)
assert len(list(repo.log()))
shutil.rmtree(dirname)
def test_index(self):
i = self.repo.index()
assert i['type_s'] == 'SVN Repository', i
def test_log_id_only(self):
entries = list(self.repo.log(id_only=True))
assert_equal(entries, [6, 5, 4, 3, 2, 1])
def test_log(self):
entries = list(self.repo.log(id_only=False))
assert_equal(entries, [
{'parents': [5],
'refs': ['HEAD'],
'committed': {
'date': datetime(2013, 11, 8, 13, 38, 11, 152821),
'name': u'coldmind', 'email': ''},
'message': u'',
'rename_details': {},
'id': 6,
'authored': {
'date': datetime(2013, 11, 8, 13, 38, 11, 152821),
'name': u'coldmind',
'email': ''
}, 'size': 0},
{'parents': [4],
'refs': [],
'committed': {
'date': datetime(2010, 11, 18, 20, 14, 21, 515743),
'name': u'rick446',
'email': ''},
'message': u'Copied a => b',
'rename_details': {},
'id': 5,
'authored': {
'date': datetime(2010, 11, 18, 20, 14, 21, 515743),
'name': u'rick446',
'email': ''},
'size': 0},
{'parents': [3],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 59, 383719),
'name': u'rick446',
'email': ''},
'message': u'Remove hello.txt',
'rename_details': {},
'id': 4,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 59, 383719),
'name': u'rick446',
'email': ''},
'size': 0},
{'parents': [2],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'name': u'rick446',
'email': ''},
'message': u'Modify readme',
'rename_details': {},
'id': 3,
'authored':
{'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'name': u'rick446',
'email': ''},
'size': 0},
{'parents': [1],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 36, 221863),
'name': u'rick446',
'email': ''},
'message': u'Add path',
'rename_details': {},
'id': 2,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 36, 221863),
'name': u'rick446',
'email': ''},
'size': 0},
{'parents': [],
'refs': [],
'committed': {
'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'name': u'rick446',
'email': ''},
'message': u'Create readme',
'rename_details': {},
'id': 1,
'authored': {
'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'name': u'rick446',
'email': ''},
'size': 0}])
def test_log_file(self):
entries = list(self.repo.log(path='/README', id_only=False))
assert_equal(entries, [
{'authored': {'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'email': '',
'name': u'rick446'},
'committed': {'date': datetime(2010, 10, 8, 15, 32, 48, 272296),
'email': '',
'name': u'rick446'},
'id': 3,
'message': u'Modify readme',
'parents': [2],
'refs': [],
'size': 28,
'rename_details': {}},
{'authored': {'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'email': '',
'name': u'rick446'},
'committed': {'date': datetime(2010, 10, 8, 15, 32, 7, 238375),
'email': '',
'name': u'rick446'},
'id': 1,
'message': u'Create readme',
'parents': [],
'refs': [],
'size': 15,
'rename_details': {}},
])
def test_is_file(self):
assert self.repo.is_file('/README')
assert not self.repo.is_file('/a')
def test_paged_diffs(self):
entry = self.repo.commit(self.repo.log(2, id_only=True).next())
self.assertEqual(entry.diffs, entry.paged_diffs())
self.assertEqual(entry.diffs, entry.paged_diffs(start=0))
added_expected = entry.diffs.added[1:3]
expected = dict(
copied=[], changed=[], removed=[],
added=added_expected, total=4)
actual = entry.paged_diffs(start=1, end=3)
self.assertEqual(expected, actual)
empty = M.repo.Commit().paged_diffs()
self.assertEqual(sorted(actual.keys()), sorted(empty.keys()))
def test_diff_create_file(self):
entry = self.repo.commit(self.repo.log(1, id_only=True).next())
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[],
removed=[], added=['/README'], total=1))
def test_diff_create_path(self):
entry = self.repo.commit(self.repo.log(2, id_only=True).next())
actual = entry.diffs
actual.added = sorted(actual.added)
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[], removed=[],
added=sorted([
'/a', '/a/b', '/a/b/c',
'/a/b/c/hello.txt']), total=4))
def test_diff_modify_file(self):
entry = self.repo.commit(self.repo.log(3, id_only=True).next())
self.assertEqual(
entry.diffs, dict(
copied=[], changed=['/README'],
removed=[], added=[], total=1))
def test_diff_delete(self):
entry = self.repo.commit(self.repo.log(4, id_only=True).next())
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[],
removed=['/a/b/c/hello.txt'], added=[], total=1))
def test_diff_copy(self):
# Copies are currently only detected as 'add'
entry = self.repo.commit(self.repo.log(5, id_only=True).next())
self.assertEqual(
entry.diffs, dict(
copied=[], changed=[],
removed=[], added=['/b'], total=1))
def test_commit(self):
entry = self.repo.commit(1)
assert entry.committed.name == 'rick446'
assert entry.message
def test_svn_path_exists(self):
repo_path = pkg_resources.resource_filename(
'forgesvn', 'tests/data/testsvn')
assert svn_path_exists("file://%s/a" % repo_path)
assert svn_path_exists("file://%s" % repo_path)
assert not svn_path_exists("file://%s/badpath" % repo_path)
with mock.patch('forgesvn.model.svn.pysvn') as pysvn:
svn_path_exists('dummy')
pysvn.Client.return_value.info2.assert_called_once_with(
'dummy',
revision=pysvn.Revision.return_value,
recurse=False)
@onlyif(os.path.exists(tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')), 'zip binary is missing')
def test_tarball(self):
tmpdir = tg.config['scm.repos.tarball.root']
assert_equal(self.repo.tarball_path,
os.path.join(tmpdir, 'svn/t/te/test/testsvn'))
assert_equal(self.repo.tarball_url('1'),
'file:///svn/t/te/test/testsvn/test-src-1.zip')
self.repo.tarball('1')
assert os.path.isfile(
os.path.join(tmpdir, "svn/t/te/test/testsvn/test-src-1.zip"))
tarball_zip = ZipFile(
os.path.join(tmpdir, 'svn/t/te/test/testsvn/test-src-1.zip'), 'r')
assert_equal(tarball_zip.namelist(),
['test-src-1/', 'test-src-1/README'])
shutil.rmtree(self.repo.tarball_path.encode('utf-8'),
ignore_errors=True)
@onlyif(os.path.exists(tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')), 'zip binary is missing')
def test_tarball_aware_of_tags(self):
rev = '19'
tag_content = sorted(['test-svn-tags-19-tags-tag-1.0/',
'test-svn-tags-19-tags-tag-1.0/svn-commit.tmp',
'test-svn-tags-19-tags-tag-1.0/README'])
h.set_context('test', 'svn-tags', neighborhood='Projects')
tmpdir = tg.config['scm.repos.tarball.root']
tarball_path = os.path.join(
tmpdir, 'svn/t/te/test/testsvn-trunk-tags-branches/')
fn = tarball_path + 'test-svn-tags-19-tags-tag-1.0.zip'
self.svn_tags.tarball(rev, '/tags/tag-1.0/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()), tag_content)
os.remove(fn)
self.svn_tags.tarball(rev, '/tags/tag-1.0/some/path/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()), tag_content)
os.remove(fn)
# if inside of tags, but no tag is specified
# expect snapshot of trunk
fn = tarball_path + 'test-svn-tags-19-trunk.zip'
self.svn_tags.tarball(rev, '/tags/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()),
sorted(['test-svn-tags-19-trunk/',
'test-svn-tags-19-trunk/aaa.txt',
'test-svn-tags-19-trunk/bbb.txt',
'test-svn-tags-19-trunk/ccc.txt',
'test-svn-tags-19-trunk/README']))
shutil.rmtree(tarball_path, ignore_errors=True)
@onlyif(os.path.exists(tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')), 'zip binary is missing')
def test_tarball_aware_of_branches(self):
rev = '19'
branch_content = sorted(['test-svn-tags-19-branches-aaa/',
'test-svn-tags-19-branches-aaa/aaa.txt',
'test-svn-tags-19-branches-aaa/svn-commit.tmp',
'test-svn-tags-19-branches-aaa/README'])
h.set_context('test', 'svn-tags', neighborhood='Projects')
tmpdir = tg.config['scm.repos.tarball.root']
tarball_path = os.path.join(
tmpdir, 'svn/t/te/test/testsvn-trunk-tags-branches/')
fn = tarball_path + 'test-svn-tags-19-branches-aaa.zip'
self.svn_tags.tarball(rev, '/branches/aaa/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()), branch_content)
os.remove(fn)
self.svn_tags.tarball(rev, '/branches/aaa/some/path/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()), branch_content)
os.remove(fn)
# if inside of branches, but no branch is specified
# expect snapshot of trunk
fn = tarball_path + 'test-svn-tags-19-trunk.zip'
self.svn_tags.tarball(rev, '/branches/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()),
sorted(['test-svn-tags-19-trunk/',
'test-svn-tags-19-trunk/aaa.txt',
'test-svn-tags-19-trunk/bbb.txt',
'test-svn-tags-19-trunk/ccc.txt',
'test-svn-tags-19-trunk/README']))
shutil.rmtree(tarball_path, ignore_errors=True)
@onlyif(os.path.exists(tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')), 'zip binary is missing')
def test_tarball_aware_of_trunk(self):
rev = '19'
trunk_content = sorted(['test-svn-tags-19-trunk/',
'test-svn-tags-19-trunk/aaa.txt',
'test-svn-tags-19-trunk/bbb.txt',
'test-svn-tags-19-trunk/ccc.txt',
'test-svn-tags-19-trunk/README'])
h.set_context('test', 'svn-tags', neighborhood='Projects')
tmpdir = tg.config['scm.repos.tarball.root']
tarball_path = os.path.join(
tmpdir, 'svn/t/te/test/testsvn-trunk-tags-branches/')
fn = tarball_path + 'test-svn-tags-19-trunk.zip'
self.svn_tags.tarball(rev, '/trunk/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()), trunk_content)
os.remove(fn)
self.svn_tags.tarball(rev, '/trunk/some/path/')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()), trunk_content)
os.remove(fn)
# no path, but there are trunk in the repo
# expect snapshot of trunk
self.svn_tags.tarball(rev)
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(sorted(snapshot.namelist()), trunk_content)
os.remove(fn)
# no path, and no trunk dir
# expect snapshot of repo root
h.set_context('test', 'src', neighborhood='Projects')
fn = os.path.join(tmpdir, 'svn/t/te/test/testsvn/test-src-1.zip')
self.repo.tarball('1')
assert os.path.isfile(fn), fn
snapshot = ZipFile(fn, 'r')
assert_equal(snapshot.namelist(), ['test-src-1/', 'test-src-1/README'])
shutil.rmtree(os.path.join(tmpdir, 'svn/t/te/test/testsvn/'),
ignore_errors=True)
shutil.rmtree(tarball_path, ignore_errors=True)
def test_is_empty(self):
assert not self.repo.is_empty()
with TempDirectory() as d:
repo2 = SM.Repository(
name='test',
fs_path=d.path,
url_path='/test/',
tool='svn',
status='creating')
repo2.init()
assert repo2.is_empty()
repo2.refresh()
ThreadLocalORMSession.flush_all()
assert repo2.is_empty()
class TestSVNRev(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
self.repo = SM.Repository(
name='testsvn',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.repo.refresh()
self.rev = self.repo.commit(1)
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_url(self):
assert self.rev.url().endswith('/1/')
def test_primary(self):
assert self.rev.primary() == self.rev
def test_shorthand(self):
assert self.rev.shorthand_id() == '[r1]'
def test_diff(self):
diffs = (self.rev.diffs.added
+ self.rev.diffs.removed
+ self.rev.diffs.changed
+ self.rev.diffs.copied)
for d in diffs:
print d
def _oid(self, rev_id):
return '%s:%s' % (self.repo._id, rev_id)
def test_log(self):
# path only
commits = list(self.repo.log(self.repo.head, id_only=True))
assert_equal(commits, [6, 5, 4, 3, 2, 1])
commits = list(self.repo.log(self.repo.head, 'README', id_only=True))
assert_equal(commits, [3, 1])
commits = list(self.repo.log(1, 'README', id_only=True))
assert_equal(commits, [1])
commits = list(self.repo.log(self.repo.head, 'a/b/c/', id_only=True))
assert_equal(commits, [4, 2])
commits = list(self.repo.log(3, 'a/b/c/', id_only=True))
assert_equal(commits, [2])
assert_equal(
list(self.repo.log(self.repo.head, 'does/not/exist', id_only=True)), [])
def test_notification_email(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
self.repo = SM.Repository(
name='testsvn',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.repo.refresh()
ThreadLocalORMSession.flush_all()
send_notifications(self.repo, [self.repo.rev_to_commit_id(1)])
ThreadLocalORMSession.flush_all()
n = M.Notification.query.find(
dict(subject='[test:src] [r1] - rick446: Create readme')).first()
assert n
assert_equal(n.text, 'Create readme http://localhost/p/test/src/1/')
class _Test(unittest.TestCase):
idgen = ('obj_%d' % i for i in count())
def _make_tree(self, object_id, **kwargs):
t, isnew = M.repo.Tree.upsert(object_id)
repo = getattr(self, 'repo', None)
t.repo = repo
for k, v in kwargs.iteritems():
if isinstance(v, basestring):
obj = M.repo.Blob(
t, k, self.idgen.next())
t.blob_ids.append(Object(
name=k, id=obj._id))
else:
obj = self._make_tree(self.idgen.next(), **v)
t.tree_ids.append(Object(
name=k, id=obj._id))
session(t).flush()
return t
def _make_commit(self, object_id, **tree_parts):
ci, isnew = M.repo.Commit.upsert(object_id)
if isnew:
ci.committed.email = c.user.email_addresses[0]
ci.authored.email = c.user.email_addresses[0]
dt = datetime.utcnow()
# BSON datetime resolution is to 1 millisecond, not 1 microsecond
# like Python. Round this now so it'll match the value that's
# pulled from MongoDB in the tests.
ci.authored.date = dt.replace(
microsecond=dt.microsecond / 1000 * 1000)
ci.message = 'summary\n\nddescription'
ci.set_context(self.repo)
ci.tree_id = 't_' + object_id
ci.tree = self._make_tree(ci.tree_id, **tree_parts)
return ci, isnew
def _make_log(self, ci):
session(ci).flush(ci)
rb = M.repo_refresh.CommitRunBuilder([ci._id])
rb.run()
rb.cleanup()
def setUp(self):
setup_basic_test()
setup_global_objects()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
self.prefix = tg.config.get('scm.repos.root', '/')
class _TestWithRepo(_Test):
def setUp(self):
super(_TestWithRepo, self).setUp()
h.set_context('test', neighborhood='Projects')
c.project.install_app('svn', 'test1')
h.set_context('test', 'test1', neighborhood='Projects')
self.repo = M.Repository(name='test1', tool='svn')
self.repo._impl = mock.Mock(spec=M.RepositoryImplementation())
self.repo._impl.shorthand_for_commit = M.RepositoryImplementation.shorthand_for_commit
self.repo._impl.url_for_commit = (
lambda *a, **kw: M.RepositoryImplementation.url_for_commit(
self.repo._impl, *a, **kw))
self.repo._impl._repo = self.repo
self.repo._impl.all_commit_ids = lambda *a, **kw: []
self.repo._impl.commit().symbolic_ids = None
ThreadLocalORMSession.flush_all()
# ThreadLocalORMSession.close_all()
class _TestWithRepoAndCommit(_TestWithRepo):
def setUp(self):
super(_TestWithRepoAndCommit, self).setUp()
self.ci, isnew = self._make_commit('foo')
ThreadLocalORMSession.flush_all()
# ThreadLocalORMSession.close_all()
class TestRepo(_TestWithRepo):
def test_create(self):
assert self.repo.fs_path == os.path.join(self.prefix, 'svn/p/test/')
assert self.repo.url_path == '/p/test/'
assert self.repo.full_fs_path == os.path.join(
self.prefix, 'svn/p/test/test1')
def test_passthrough(self):
argless = ['init']
for fn in argless:
getattr(self.repo, fn)()
getattr(self.repo._impl, fn).assert_called_with()
unary = ['commit', 'open_blob']
for fn in unary:
getattr(self.repo, fn)('foo')
getattr(self.repo._impl, fn).assert_called_with('foo')
def test_shorthand_for_commit(self):
self.assertEqual(
self.repo.shorthand_for_commit('a' * 40),
'[aaaaaa]')
def test_url_for_commit(self):
self.assertEqual(
self.repo.url_for_commit('a' * 40),
'/p/test/test1/ci/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/')
@mock.patch('allura.model.repository.g.post_event')
def test_init_as_clone(self, post_event):
self.repo.init_as_clone('srcpath', 'srcname', 'srcurl')
assert self.repo.upstream_repo.name == 'srcname'
assert self.repo.upstream_repo.url == 'srcurl'
assert self.repo._impl.clone_from.called_with('srcpath')
post_event.assert_called_once_with('repo_cloned', 'srcurl', 'srcpath')
def test_latest(self):
ci = mock.Mock()
self.repo._impl.commit = mock.Mock(return_value=ci)
assert self.repo.latest() is ci
def test_index(self):
i = self.repo.index()
assert i['type_s'] == 'Repository', i
assert i['name_s'] == 'test1', i
def test_scm_host_url(self):
assert (
self.repo.clone_url('rw', 'nobody')
== 'svn+ssh://nobody@localhost:8022/scm-repo/p/test/test1/'),\
self.repo.clone_url('rw', 'nobody')
assert (
self.repo.clone_url('https', 'nobody')
== 'https://nobody@localhost:8022/scm-repo/p/test/test1/'),\
self.repo.clone_url('https', 'nobody')
def test_merge_request(self):
M.MergeRequest.upsert(app_config_id=c.app.config._id, status='open')
M.MergeRequest.upsert(app_config_id=c.app.config._id, status='closed')
session(M.MergeRequest).flush()
session(M.MergeRequest).clear()
assert self.repo.merge_requests_by_statuses('open').count() == 1
assert self.repo.merge_requests_by_statuses('closed').count() == 1
assert self.repo.merge_requests_by_statuses(
'open', 'closed').count() == 2
def test_guess_type(self):
assert self.repo.guess_type('foo.txt') == ('text/plain', None)
assert self.repo.guess_type('foo.gbaer') == (
'application/octet-stream', None)
assert self.repo.guess_type('foo.html') == ('text/html', None)
assert self.repo.guess_type('.gitignore') == ('text/plain', None)
def test_refresh(self):
committer_name = 'Test Committer'
committer_email = '[email protected]'
ci = mock.Mock()
ci.authored.name = committer_name
ci.committed.name = committer_name
ci.committed.email = committer_email
ci.author_url = '/u/test-committer/'
ci.activity_name = '[deadbeef]'
ci.activity_url = 'url'
ci.activity_extras = {}
del ci.node_id
self.repo._impl.commit = mock.Mock(return_value=ci)
self.repo._impl.new_commits = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
self.repo._impl.all_commit_ids = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
self.repo.symbolics_for_commit = mock.Mock(
return_value=[['master', 'branch'], []])
def refresh_commit_info(oid, seen, lazy=False):
M.repo.CommitDoc(dict(
authored=dict(
name=committer_name,
email=committer_email),
_id=oid)).m.insert()
self.repo._impl.refresh_commit_info = refresh_commit_info
_id = lambda oid: getattr(oid, '_id', str(oid))
self.repo.shorthand_for_commit = lambda oid: '[' + _id(oid) + ']'
self.repo.url_for_commit = lambda oid: '/ci/' + _id(oid) + '/'
self.repo.refresh()
ThreadLocalORMSession.flush_all()
notifications = M.Notification.query.find().all()
for n in notifications:
if '100 new commits' in n.subject:
assert "master,branch: by %s http://localhost/ci/foo99" % committer_name in n.text
break
else:
assert False, 'Did not find notification'
assert M.Feed.query.find(dict(
author_name=committer_name)).count() == 100
def test_refresh_private(self):
ci = mock.Mock()
self.repo._impl.commit = mock.Mock(return_value=ci)
self.repo._impl.new_commits = mock.Mock(
return_value=['foo%d' % i for i in range(100)])
# make unreadable by *anonymous, so additional notification logic
# executes
self.repo.acl = []
c.project.acl = []
self.repo.refresh()
def test_push_upstream_context(self):
self.repo.init_as_clone('srcpath', '/p/test/svn/', '/p/test/svn/')
old_app_instance = M.Project.app_instance
try:
M.Project.app_instance = mock.Mock(return_value=ming.base.Object(
config=ming.base.Object(_id=None)))
with self.repo.push_upstream_context():
assert c.project.shortname == 'test'
finally:
M.Project.app_instance = old_app_instance
def test_pending_upstream_merges(self):
self.repo.init_as_clone('srcpath', '/p/test/svn/', '/p/test/svn/')
old_app_instance = M.Project.app_instance
try:
M.Project.app_instance = mock.Mock(return_value=ming.base.Object(
config=ming.base.Object(_id=None)))
self.repo.pending_upstream_merges()
finally:
M.Project.app_instance = old_app_instance
class TestMergeRequest(_TestWithRepoAndCommit):
def setUp(self):
super(TestMergeRequest, self).setUp()
c.project.install_app('svn', 'test2')
h.set_context('test', 'test2', neighborhood='Projects')
self.repo2 = M.Repository(name='test2', tool='svn')
self.repo2._impl = mock.Mock(spec=M.RepositoryImplementation())
self.repo2._impl.log = lambda *a, **kw: (['foo'], [])
self.repo2._impl.all_commit_ids = lambda *a, **kw: []
self.repo2._impl._repo = self.repo2
self.repo2.init_as_clone('/p/test/', 'test1', '/p/test/test1/')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_upsert(self):
h.set_context('test', 'test1', neighborhood='Projects')
mr = M.MergeRequest.upsert(
downstream=ming.base.Object(
project_id=c.project._id,
mount_point='test2',
commit_id='foo:2'),
target_branch='foobranch',
summary='summary',
description='description')
u = M.User.by_username('test-admin')
assert_equal(mr.creator, u)
assert_equal(mr.creator_name, u.get_pref('display_name'))
assert_equal(mr.creator_url, u.url())
assert_equal(mr.downstream_url, '/p/test/test2/')
assert_equal(mr.downstream_repo_url,
'http://svn.localhost/p/test/test2/')
with mock.patch('forgesvn.model.svn.SVNLibWrapper') as _svn,\
mock.patch('forgesvn.model.svn.SVNImplementation._map_log') as _map_log:
mr.app.repo._impl.head = 1
_svn().log.return_value = [mock.Mock(revision=mock.Mock(number=2))]
_map_log.return_value = 'bar'
assert_equal(mr.commits, ['bar'])
# can't do assert_called_once_with because pysvn.Revision doesn't
# compare nicely
assert_equal(_svn().log.call_count, 1)
assert_equal(_svn().log.call_args[0],
('file:///tmp/svn/p/test/test2',))
assert_equal(_svn().log.call_args[1]['revision_start'].number, 2)
assert_equal(_svn().log.call_args[1]['limit'], 25)
_map_log.assert_called_once_with(
_svn().log.return_value[0], 'file:///tmp/svn/p/test/test2', None)
class TestRepoObject(_TestWithRepoAndCommit):
def test_upsert(self):
obj0, isnew0 = M.repo.Tree.upsert('foo1')
obj1, isnew1 = M.repo.Tree.upsert('foo1')
assert obj0 is obj1
assert isnew0 and not isnew1
def test_artifact_methods(self):
assert self.ci.index_id(
) == 'allura/model/repo/Commit#foo', self.ci.index_id()
assert self.ci.primary() is self.ci, self.ci.primary()
class TestCommit(_TestWithRepo):
def setUp(self):
super(TestCommit, self).setUp()
self.ci, isnew = self._make_commit(
'foo',
a=dict(
a=dict(
a='',
b='',),
b=''))
self.tree = self.ci.tree
impl = M.RepositoryImplementation()
impl._repo = self.repo
self.repo._impl.shorthand_for_commit = impl.shorthand_for_commit
self.repo._impl.url_for_commit = impl.url_for_commit
def test_upsert(self):
obj0, isnew0 = M.repo.Commit.upsert('foo')
obj1, isnew1 = M.repo.Commit.upsert('foo')
assert obj0 is obj1
assert not isnew1
u = M.User.by_username('test-admin')
assert self.ci.author_url == u.url()
assert self.ci.committer_url == u.url()
assert self.ci.tree is self.tree
assert self.ci.summary == 'summary'
assert self.ci.shorthand_id() == '[foo]'
assert self.ci.url() == '/p/test/test1/ci/foo/'
def test_get_path(self):
b = self.ci.get_path('a/a/a')
assert isinstance(b, M.repo.Blob)
x = self.ci.get_path('a/a')
assert isinstance(x, M.repo.Tree)
def _unique_blobs(self):
def counter():
counter.i += 1
return counter.i
counter.i = 0
blobs = defaultdict(counter)
from cStringIO import StringIO
return lambda blob: StringIO(str(blobs[blob.path()]))
def test_compute_diffs(self):
self.repo._impl.commit = mock.Mock(return_value=self.ci)
self.repo._impl.open_blob = self._unique_blobs()
M.repo_refresh.refresh_commit_trees(self.ci, {})
M.repo_refresh.compute_diffs(self.repo._id, {}, self.ci)
# self.ci.compute_diffs()
assert_equal(self.ci.diffs.added,
['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (self.ci.diffs.copied
== self.ci.diffs.changed
== self.ci.diffs.removed
== [])
ci, isnew = self._make_commit('bar')
ci.parent_ids = ['foo']
self._make_log(ci)
M.repo_refresh.refresh_commit_trees(ci, {})
M.repo_refresh.compute_diffs(self.repo._id, {}, ci)
assert_equal(ci.diffs.removed, ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (ci.diffs.copied
== ci.diffs.changed
== ci.diffs.added
== [])
ci, isnew = self._make_commit(
'baz',
b=dict(
a=dict(
a='',
b='',),
b=''))
ci.parent_ids = ['foo']
self._make_log(ci)
M.repo_refresh.refresh_commit_trees(ci, {})
M.repo_refresh.compute_diffs(self.repo._id, {}, ci)
assert_equal(ci.diffs.added, ['b', 'b/a', 'b/a/a', 'b/a/b', 'b/b'])
assert_equal(ci.diffs.removed, ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (ci.diffs.copied
== ci.diffs.changed
== [])
def test_diffs_file_renames(self):
def open_blob(blob):
blobs = {
u'a': u'Leia',
u'/b/a/a': u'Darth Vader',
u'/b/a/b': u'Luke Skywalker',
u'/b/b': u'Death Star will destroy you',
u'/b/c': u'Luke Skywalker', # moved from /b/a/b
# moved from /b/b and modified
u'/b/a/z': u'Death Star will destroy you\nALL',
}
from cStringIO import StringIO
return StringIO(blobs.get(blob.path(), ''))
self.repo._impl.open_blob = open_blob
self.repo._impl.commit = mock.Mock(return_value=self.ci)
M.repo_refresh.refresh_commit_trees(self.ci, {})
M.repo_refresh.compute_diffs(self.repo._id, {}, self.ci)
assert_equal(self.ci.diffs.added,
['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (self.ci.diffs.copied
== self.ci.diffs.changed
== self.ci.diffs.removed
== [])
ci, isnew = self._make_commit(
'bar',
b=dict(
a=dict(
a='',
b='',),
b=''))
ci.parent_ids = ['foo']
self._make_log(ci)
M.repo_refresh.refresh_commit_trees(ci, {})
M.repo_refresh.compute_diffs(self.repo._id, {}, ci)
assert_equal(ci.diffs.added, ['b', 'b/a', 'b/a/a', 'b/a/b', 'b/b'])
assert_equal(ci.diffs.removed, ['a', 'a/a', 'a/a/a', 'a/a/b', 'a/b'])
assert (ci.diffs.copied
== ci.diffs.changed
== [])
ci, isnew = self._make_commit(
'baz',
b=dict(
a=dict(
z=''),
c=''))
ci.parent_ids = ['bar']
self._make_log(ci)
M.repo_refresh.refresh_commit_trees(ci, {})
M.repo_refresh.compute_diffs(self.repo._id, {}, ci)
assert_equal(ci.diffs.added, [])
assert_equal(ci.diffs.changed, [])
assert_equal(ci.diffs.removed, ['b/a/a'])
# see mock for open_blob
assert_equal(len(ci.diffs.copied), 2)
assert_equal(ci.diffs.copied[0]['old'], 'b/a/b')
assert_equal(ci.diffs.copied[0]['new'], 'b/c')
assert_equal(ci.diffs.copied[0]['ratio'], 1)
assert_equal(ci.diffs.copied[0]['diff'], '')
assert_equal(ci.diffs.copied[1]['old'], 'b/b')
assert_equal(ci.diffs.copied[1]['new'], 'b/a/z')
assert ci.diffs.copied[1]['ratio'] < 1, ci.diffs.copied[1]['ratio']
assert '+++' in ci.diffs.copied[1]['diff'], ci.diffs.copied[1]['diff']
def test_context(self):
self.ci.context()
class TestRename(unittest.TestCase):
def setUp(self):
setup_basic_test()
self.setup_with_tools()
@with_svn
def setup_with_tools(self):
setup_global_objects()
h.set_context('test', 'src', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgesvn', 'tests/data/')
self.repo = SM.Repository(
name='testsvn-rename',
fs_path=repo_dir,
url_path='/test/',
tool='svn',
status='creating')
self.repo.refresh()
self.rev = self.repo.commit('HEAD')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def test_log_file_with_rename(self):
entry = list(self.repo.log(path='/dir/b.txt', id_only=False))[0]
assert_equal(entry['id'], 3)
assert_equal(entry['rename_details']['path'], '/dir/a.txt')
assert_equal(
entry['rename_details']['commit_url'],
self.repo.url_for_commit(2) # previous revision
)
def test_check_changed_path(self):
changed_path = {'copyfrom_path': '/test/path', 'path': '/test/path2'}
result = self.repo._impl._check_changed_path(
changed_path, '/test/path2/file.txt')
assert_equal({'path': '/test/path2/file.txt',
'copyfrom_path': '/test/path/file.txt'}, result)
| apache-2.0 | -6,919,306,045,110,229,000 | -1,362,389,010,200,580,900 | 38.24912 | 115 | 0.538722 | false |
benjaminjkraft/django | django/core/management/commands/makemigrations.py | 10 | 13506 | import os
import sys
import warnings
from itertools import takewhile
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.db.migrations import Migration
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import (
InteractiveMigrationQuestioner, MigrationQuestioner,
NonInteractiveMigrationQuestioner,
)
from django.db.migrations.state import ProjectState
from django.db.migrations.writer import MigrationWriter
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.six import iteritems
from django.utils.six.moves import zip
class Command(BaseCommand):
help = "Creates new migration(s) for apps."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='*',
help='Specify the app label(s) to create migrations for.')
parser.add_argument('--dry-run', action='store_true', dest='dry_run', default=False,
help="Just show what migrations would be made; don't actually write them.")
parser.add_argument('--merge', action='store_true', dest='merge', default=False,
help="Enable fixing of migration conflicts.")
parser.add_argument('--empty', action='store_true', dest='empty', default=False,
help="Create an empty migration.")
parser.add_argument('--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument('-n', '--name', action='store', dest='name', default=None,
help="Use this name for migration file(s).")
parser.add_argument('-e', '--exit', action='store_true', dest='exit_code', default=False,
help='Exit with error code 1 if no changes needing migrations are found. '
'Deprecated, use the --check option instead.')
parser.add_argument('--check', action='store_true', dest='check_changes',
help='Exit with a non-zero status if model changes are missing migrations.')
def handle(self, *app_labels, **options):
self.verbosity = options.get('verbosity')
self.interactive = options.get('interactive')
self.dry_run = options.get('dry_run', False)
self.merge = options.get('merge', False)
self.empty = options.get('empty', False)
self.migration_name = options.get('name')
self.exit_code = options.get('exit_code', False)
check_changes = options['check_changes']
if self.exit_code:
warnings.warn(
"The --exit option is deprecated in favor of the --check option.",
RemovedInDjango20Warning
)
# Make sure the app they asked for exists
app_labels = set(app_labels)
bad_app_labels = set()
for app_label in app_labels:
try:
apps.get_app_config(app_label)
except LookupError:
bad_app_labels.add(app_label)
if bad_app_labels:
for app_label in bad_app_labels:
self.stderr.write("App '%s' could not be found. Is it in INSTALLED_APPS?" % app_label)
sys.exit(2)
# Load the current graph state. Pass in None for the connection so
# the loader doesn't try to resolve replaced migrations from DB.
loader = MigrationLoader(None, ignore_no_migrations=True)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any and they don't want to merge
conflicts = loader.detect_conflicts()
# If app_labels is specified, filter out conflicting migrations for unspecified apps
if app_labels:
conflicts = {
app_label: conflict for app_label, conflict in iteritems(conflicts)
if app_label in app_labels
}
if conflicts and not self.merge:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they want to merge and there's nothing to merge, then politely exit
if self.merge and not conflicts:
self.stdout.write("No conflicts detected to merge.")
return
# If they want to merge and there is something to merge, then
# divert into the merge code
if self.merge and conflicts:
return self.handle_merge(loader, conflicts)
if self.interactive:
questioner = InteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
else:
questioner = NonInteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
# Set up autodetector
autodetector = MigrationAutodetector(
loader.project_state(),
ProjectState.from_apps(apps),
questioner,
)
# If they want to make an empty migration, make one for each app
if self.empty:
if not app_labels:
raise CommandError("You must supply at least one app label when using --empty.")
# Make a fake changes() result we can pass to arrange_for_graph
changes = {
app: [Migration("custom", app)]
for app in app_labels
}
changes = autodetector.arrange_for_graph(
changes=changes,
graph=loader.graph,
migration_name=self.migration_name,
)
self.write_migration_files(changes)
return
# Detect changes
changes = autodetector.changes(
graph=loader.graph,
trim_to_apps=app_labels or None,
convert_apps=app_labels or None,
migration_name=self.migration_name,
)
if not changes:
# No changes? Tell them.
if self.verbosity >= 1:
if len(app_labels) == 1:
self.stdout.write("No changes detected in app '%s'" % app_labels.pop())
elif len(app_labels) > 1:
self.stdout.write("No changes detected in apps '%s'" % ("', '".join(app_labels)))
else:
self.stdout.write("No changes detected")
if self.exit_code:
sys.exit(1)
else:
self.write_migration_files(changes)
if check_changes:
sys.exit(1)
def write_migration_files(self, changes):
"""
Takes a changes dict and writes them out as migration files.
"""
directory_created = {}
for app_label, app_migrations in changes.items():
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Migrations for '%s':" % app_label) + "\n")
for migration in app_migrations:
# Describe the migration
writer = MigrationWriter(migration)
if self.verbosity >= 1:
# Display a relative path if it's below the current working
# directory, or an absolute path otherwise.
migration_string = os.path.relpath(writer.path)
if migration_string.startswith('..'):
migration_string = writer.path
self.stdout.write(" %s:\n" % (self.style.MIGRATE_LABEL(migration_string),))
for operation in migration.operations:
self.stdout.write(" - %s\n" % operation.describe())
if not self.dry_run:
# Write the migrations file to the disk.
migrations_directory = os.path.dirname(writer.path)
if not directory_created.get(app_label):
if not os.path.isdir(migrations_directory):
os.mkdir(migrations_directory)
init_path = os.path.join(migrations_directory, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
# We just do this once per app
directory_created[app_label] = True
migration_string = writer.as_string()
with open(writer.path, "wb") as fh:
fh.write(migration_string)
elif self.verbosity == 3:
# Alternatively, makemigrations --dry-run --verbosity 3
# will output the migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
def handle_merge(self, loader, conflicts):
"""
Handles merging together conflicted migrations interactively,
if it's safe; otherwise, advises on how to fix it.
"""
if self.interactive:
questioner = InteractiveMigrationQuestioner()
else:
questioner = MigrationQuestioner(defaults={'ask_merge': True})
for app_label, migration_names in conflicts.items():
# Grab out the migrations in question, and work out their
# common ancestor.
merge_migrations = []
for migration_name in migration_names:
migration = loader.get_migration(app_label, migration_name)
migration.ancestry = [
mig for mig in loader.graph.forwards_plan((app_label, migration_name))
if mig[0] == migration.app_label
]
merge_migrations.append(migration)
all_items_equal = lambda seq: all(item == seq[0] for item in seq[1:])
merge_migrations_generations = zip(*[m.ancestry for m in merge_migrations])
common_ancestor_count = sum(1 for common_ancestor_generation
in takewhile(all_items_equal, merge_migrations_generations))
if not common_ancestor_count:
raise ValueError("Could not find common ancestor of %s" % migration_names)
# Now work out the operations along each divergent branch
for migration in merge_migrations:
migration.branch = migration.ancestry[common_ancestor_count:]
migrations_ops = (loader.get_migration(node_app, node_name).operations
for node_app, node_name in migration.branch)
migration.merged_operations = sum(migrations_ops, [])
# In future, this could use some of the Optimizer code
# (can_optimize_through) to automatically see if they're
# mergeable. For now, we always just prompt the user.
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Merging %s" % app_label))
for migration in merge_migrations:
self.stdout.write(self.style.MIGRATE_LABEL(" Branch %s" % migration.name))
for operation in migration.merged_operations:
self.stdout.write(" - %s\n" % operation.describe())
if questioner.ask_merge(app_label):
# If they still want to merge it, then write out an empty
# file depending on the migrations needing merging.
numbers = [
MigrationAutodetector.parse_number(migration.name)
for migration in merge_migrations
]
try:
biggest_number = max(x for x in numbers if x is not None)
except ValueError:
biggest_number = 1
subclass = type("Migration", (Migration, ), {
"dependencies": [(app_label, migration.name) for migration in merge_migrations],
})
new_migration = subclass("%04i_merge" % (biggest_number + 1), app_label)
writer = MigrationWriter(new_migration)
if not self.dry_run:
# Write the merge migrations file to the disk
with open(writer.path, "wb") as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write("\nCreated new merge migration %s" % writer.path)
elif self.verbosity == 3:
# Alternatively, makemigrations --merge --dry-run --verbosity 3
# will output the merge migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full merge migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
| bsd-3-clause | -4,530,084,608,231,821,300 | 8,129,730,028,436,960,000 | 47.582734 | 107 | 0.570339 | false |
4shadoww/hakkuframework | core/lib/dns/rdtypes/ANY/SSHFP.py | 8 | 2829 | # Copyright (C) 2005-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import binascii
import dns.rdata
import dns.rdatatype
class SSHFP(dns.rdata.Rdata):
"""SSHFP record
@ivar algorithm: the algorithm
@type algorithm: int
@ivar fp_type: the digest type
@type fp_type: int
@ivar fingerprint: the fingerprint
@type fingerprint: string
@see: draft-ietf-secsh-dns-05.txt"""
__slots__ = ['algorithm', 'fp_type', 'fingerprint']
def __init__(self, rdclass, rdtype, algorithm, fp_type,
fingerprint):
super(SSHFP, self).__init__(rdclass, rdtype)
self.algorithm = algorithm
self.fp_type = fp_type
self.fingerprint = fingerprint
def to_text(self, origin=None, relativize=True, **kw):
return '%d %d %s' % (self.algorithm,
self.fp_type,
dns.rdata._hexify(self.fingerprint,
chunksize=128))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
algorithm = tok.get_uint8()
fp_type = tok.get_uint8()
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value.encode())
fingerprint = b''.join(chunks)
fingerprint = binascii.unhexlify(fingerprint)
return cls(rdclass, rdtype, algorithm, fp_type, fingerprint)
def to_wire(self, file, compress=None, origin=None):
header = struct.pack("!BB", self.algorithm, self.fp_type)
file.write(header)
file.write(self.fingerprint)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
header = struct.unpack("!BB", wire[current: current + 2])
current += 2
rdlen -= 2
fingerprint = wire[current: current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], header[1], fingerprint)
| mit | -3,453,848,641,144,263,700 | -7,185,504,113,859,237,000 | 35.269231 | 75 | 0.63556 | false |
kived/py-cnotify | test/variable.py | 4 | 15715 | # -*- coding: utf-8 -*-
#--------------------------------------------------------------------#
# This file is part of Py-notify. #
# #
# Copyright (C) 2007, 2008 Paul Pogonyshev. #
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public License #
# as published by the Free Software Foundation; either version 2.1 #
# of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with this library; if not, write to the Free #
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #
# Boston, MA 02110-1301 USA #
#--------------------------------------------------------------------#
if __name__ == '__main__':
import os
import sys
sys.path.insert (0, os.path.join (sys.path[0], os.pardir))
import math
import unittest
from notify.variable import AbstractVariable, AbstractValueTrackingVariable, Variable, \
WatcherVariable
from notify.utils import StringType
from test.__common import NotifyTestCase, NotifyTestObject
class BaseVariableTestCase (NotifyTestCase):
def test_mutable (self):
mutable_variable = Variable ()
self.assert_(mutable_variable.mutable)
def test_predicate_1 (self):
variable = Variable (0)
is_single_digit = variable.predicate (lambda value: 0 <= value < 10)
self.assert_(is_single_digit)
self.assert_(not is_single_digit.mutable)
variable.value = -5
self.assert_(not is_single_digit)
variable.value = 9
self.assert_(is_single_digit)
variable.value = 100
self.assert_(not is_single_digit)
def test_predicate_2 (self):
test = NotifyTestObject ()
variable = Variable (0)
variable.predicate (lambda value: 0 <= value < 10).store (test.simple_handler)
variable.value = 5
variable.value = 15
variable.value = -1
variable.value = 9
variable.value = 3
test.assert_results (True, False, True)
def test_is_true (self):
variable = Variable (0)
is_true = variable.is_true ()
self.assert_(not is_true)
variable.value = 'string'
self.assert_(is_true)
variable.value = []
self.assert_(not is_true)
variable.value = None
self.assert_(not is_true)
variable.value = 25
self.assert_(is_true)
def test_transformation_1 (self):
variable = Variable (0)
floor = variable.transform (math.floor)
self.assertEqual (floor.value, 0)
self.assert_(not floor.mutable)
variable.value = 10.5
self.assertEqual (floor.value, 10)
variable.value = 15
self.assertEqual (floor.value, 15)
def test_transformation_2 (self):
test = NotifyTestObject ()
variable = Variable (0)
variable.transform (math.floor).store (test.simple_handler)
variable.value = 5
variable.value = 5.6
variable.value = 15.7
variable.value = 16
variable.value = 16.5
variable.value = 16.2
test.assert_results (0, 5, 15, 16)
def test_is_allowed_value (self):
class PositiveVariable (Variable):
def is_allowed_value (self, value):
return isinstance (value, int) and value > 0
variable = PositiveVariable (6)
# Must not raise.
variable.value = 9
variable.value = 999
# Must raise.
self.assertRaises (ValueError, lambda: variable.set (0))
self.assertRaises (ValueError, lambda: variable.set (-5))
self.assertRaises (ValueError, lambda: variable.set (2.2))
self.assertRaises (ValueError, lambda: variable.set ([]))
class WatcherVariableTestCase (NotifyTestCase):
def test_watcher_variable_1 (self):
test = NotifyTestObject ()
watcher = WatcherVariable ()
watcher.store (test.simple_handler)
variable = Variable ('abc')
watcher.watch (variable)
self.assert_(watcher.watched_variable is variable)
variable.value = 60
test.assert_results (None, 'abc', 60)
def test_watcher_variable_2 (self):
test = NotifyTestObject ()
variable1 = Variable ([])
variable2 = Variable ('string')
variable3 = Variable ('string')
watcher = WatcherVariable (variable1)
watcher.store (test.simple_handler)
watcher.watch (variable2)
watcher.watch (variable3)
watcher.watch (None)
self.assert_(watcher.watched_variable is None)
# Later two watch() calls must not change watcher's value.
test.assert_results ([], 'string', None)
def test_watcher_variable_error_1 (self):
self.assertRaises (TypeError, lambda: WatcherVariable (25))
def test_watcher_variable_error_2 (self):
watcher = WatcherVariable ()
self.assertRaises (TypeError, lambda: watcher.watch (25))
def test_watcher_variable_error_3 (self):
variable = Variable ()
watcher = WatcherVariable (variable)
self.assertRaises (ValueError, lambda: watcher.watch (watcher))
self.assert_ (watcher.watched_variable is variable)
class VariableDerivationTestCase (NotifyTestCase):
def test_derivation_1 (self):
IntVariable = Variable.derive_type ('IntVariable', allowed_value_types = int)
# Since None is not an allowed value, there must be no default constructor.
self.assertRaises (TypeError, lambda: IntVariable ())
count = IntVariable (10)
self.assertEqual (count.value, 10)
self.assertEqual (count.mutable, True)
count.value = 30
self.assertEqual (count.value, 30)
self.assertRaises (ValueError, lambda: count.set ('invalid'))
def test_derivation_2 (self):
EnumVariable = Variable.derive_type ('EnumVariable',
allowed_values = (None, 'a', 'b', 'c'))
variable = EnumVariable ()
self.assertEqual (variable.value, None)
self.assertEqual (variable.mutable, True)
variable.value = 'b'
self.assertEqual (variable.value, 'b')
self.assertRaises (ValueError, lambda: variable.set (15))
self.assertRaises (ValueError, lambda: variable.set ('d'))
def test_derivation_3 (self):
AbstractIntVariable = AbstractValueTrackingVariable.derive_type (
'AbstractIntVariable', allowed_value_types = int)
self.assertEqual (AbstractIntVariable (-5).mutable, False)
def test_derivation_4 (self):
NumericVariable = Variable.derive_type ('NumericVariable',
allowed_value_types = (int, float, complex))
self.assertRaises (TypeError, lambda: NumericVariable ())
variable = NumericVariable (0)
variable.value = 15
self.assertEqual (variable.value, 15)
variable.value = -2.5
self.assertEqual (variable.value, -2.5)
variable.value = 1j
self.assertEqual (variable.value, 1j)
self.assertRaises (ValueError, lambda: variable.set ('string'))
self.assertRaises (ValueError, lambda: variable.set ([]))
def test_derivation_5 (self):
IntVariable = Variable.derive_type ('IntVariable',
allowed_value_types = int, default_value = 10)
variable = IntVariable ()
self.assertEqual (variable.value, 10)
variable = IntVariable (30)
self.assertEqual (variable.value, 30)
self.assertRaises (ValueError, lambda: variable.set ('string'))
def test_derivation_6 (self):
StringVariable = Variable.derive_type ('StringVariable',
allowed_value_types = StringType,
setter = lambda variable, value: None)
variable = StringVariable ('')
self.assertRaises (ValueError, lambda: variable.set (None))
def test_derivation_7 (self):
DerivedVariable = \
AbstractValueTrackingVariable.derive_type ('DerivedVariable',
setter = lambda variable, value: None)
variable = DerivedVariable ()
self.assert_(variable.value is None)
variable.set (100)
self.assert_(variable.value == 100)
variable.value = 'abc'
self.assert_(variable.value == 'abc')
def test_derivation_8 (self):
test = NotifyTestObject ()
DerivedVariable = \
AbstractValueTrackingVariable.derive_type ('DerivedVariable',
getter = lambda variable: None,
setter = (lambda variable, value:
test.simple_handler (value)))
variable = DerivedVariable ()
variable.set (100)
variable.value = 'abc'
# The default value is retrieved with the getter function, so the setter must not
# be called during variable creation.
test.assert_results (100, 'abc')
def test_derivation_9 (self):
test = NotifyTestObject ()
DerivedVariable = \
AbstractValueTrackingVariable.derive_type ('DerivedVariable',
setter = (lambda variable, value:
test.simple_handler (value)))
variable = DerivedVariable ()
variable.set (100)
variable.value = 'abc'
# There is no getter at all, so setter must be called during variable creation.
test.assert_results (None, 100, 'abc')
def test_derivation_10 (self):
def set_value (list, value):
list[0] = value
DerivedVariable = AbstractVariable.derive_type ('DerivedVariable',
object = '__list', property = 'list',
getter = lambda list: list[0],
setter = set_value)
a = DerivedVariable ([123])
self.assertEqual (a.value, 123)
a.value = 'foo'
self.assertEqual (a.value, 'foo')
self.assertEqual (a.list, ['foo'])
def test_derivation_11 (self):
# Test that derivation with keyword slot or property raises.
self.assertRaises (ValueError, lambda: AbstractVariable.derive_type ('DerivedVariable',
object = 'or'))
self.assertRaises (ValueError, lambda: AbstractVariable.derive_type ('DerivedVariable',
object = '__class',
property = 'class'))
# Test against a real bug present up to 0.1.12.
def test_derivation_12 (self):
DerivedVariable = AbstractValueTrackingVariable.derive_type ('DerivedVariable',
object = '__list',
property = 'list')
variable = DerivedVariable ([1, 2, 3], 200)
self.assertEqual (variable.list, [1, 2, 3])
self.assertEqual (variable.value, 200)
def test_object_derivation_1 (self):
class MainObject (object):
def __init__(self, x):
self.__x = x
def get_x (self):
return self.__x
XVariable = AbstractValueTrackingVariable.derive_type ('XVariable', object = 'main',
getter = MainObject.get_x)
main = MainObject (100)
variable = XVariable (main)
self.assert_(variable.main is main)
self.assert_(variable.value is main.get_x ())
main.x = 200
self.assert_(variable.value is main.get_x ())
def test_object_derivation_2 (self):
class MainObject (object):
def __init__(self, x):
self.__x = x
self.__x_variable = XVariable (self)
def get_x (self):
return self.__x
def _set_x (self, x):
self.__x = x
x = property (lambda self: self.__x_variable)
XVariable = AbstractValueTrackingVariable.derive_type ('XVariable',
object = '__main',
property = 'main',
getter = MainObject.get_x,
setter = MainObject._set_x)
main = MainObject (100)
self.assert_(main.x.main is main)
self.assert_(main.x.value is main.get_x ())
main.x.value = 200
self.assert_(main.x.value is main.get_x ())
def set_main_x ():
main.x = None
self.assertRaises (AttributeError, set_main_x)
def test_derivation_slots (self):
DerivedVariable = AbstractVariable.derive_type ('DerivedVariable')
self.assertRaises (AttributeError, self.non_existing_attribute_setter (DerivedVariable ()))
DerivedVariable = AbstractValueTrackingVariable.derive_type ('DerivedVariable')
self.assertRaises (AttributeError, self.non_existing_attribute_setter (DerivedVariable ()))
DerivedVariable = Variable.derive_type ('DerivedVariable')
self.assertRaises (AttributeError, self.non_existing_attribute_setter (DerivedVariable ()))
def test_multiple_derivation (self):
# Derive two types and make sure they don't spoil each other's is_allowed_value()
# method.
IntVariable = Variable.derive_type ('IntVariable', allowed_value_types = int)
StrVariable = Variable.derive_type ('StrVariable', allowed_value_types = str)
integer = IntVariable (10)
string = StrVariable ('test')
integer.value = 20
self.assertEqual (integer.value, 20)
string.value = 'string'
self.assertEqual (string.value, 'string')
self.assertRaises (ValueError, lambda: integer.set ('foo'))
self.assertRaises (ValueError, lambda: string .set (-1000))
self.assertRaises (ValueError, lambda: integer.set (''))
self.assertRaises (ValueError, lambda: string .set (0))
if __name__ == '__main__':
unittest.main ()
# Local variables:
# mode: python
# python-indent: 4
# indent-tabs-mode: nil
# fill-column: 90
# End:
| lgpl-2.1 | -5,558,171,791,553,774,000 | -1,986,900,854,514,239,200 | 31.739583 | 99 | 0.547693 | false |
samuknet/servo | tests/wpt/web-platform-tests/tools/py/testing/code/test_excinfo.py | 160 | 30688 | # -*- coding: utf-8 -*-
import py
from py._code.code import FormattedExcinfo, ReprExceptionInfo
queue = py.builtin._tryimport('queue', 'Queue')
failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
from test_source import astonly
try:
import importlib
except ImportError:
invalidate_import_caches = None
else:
invalidate_import_caches = getattr(importlib, "invalidate_caches", None)
import pytest
pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3]))
class TWMock:
def __init__(self):
self.lines = []
def sep(self, sep, line=None):
self.lines.append((sep, line))
def line(self, line, **kw):
self.lines.append(line)
def markup(self, text, **kw):
return text
fullwidth = 80
def test_excinfo_simple():
try:
raise ValueError
except ValueError:
info = py.code.ExceptionInfo()
assert info.type == ValueError
def test_excinfo_getstatement():
def g():
raise ValueError
def f():
g()
try:
f()
except ValueError:
excinfo = py.code.ExceptionInfo()
linenumbers = [py.code.getrawcode(f).co_firstlineno-1+3,
py.code.getrawcode(f).co_firstlineno-1+1,
py.code.getrawcode(g).co_firstlineno-1+1,]
l = list(excinfo.traceback)
foundlinenumbers = [x.lineno for x in l]
assert foundlinenumbers == linenumbers
#for x in info:
# print "%s:%d %s" %(x.path.relto(root), x.lineno, x.statement)
#xxx
# testchain for getentries test below
def f():
#
raise ValueError
#
def g():
#
__tracebackhide__ = True
f()
#
def h():
#
g()
#
class TestTraceback_f_g_h:
def setup_method(self, method):
try:
h()
except ValueError:
self.excinfo = py.code.ExceptionInfo()
def test_traceback_entries(self):
tb = self.excinfo.traceback
entries = list(tb)
assert len(tb) == 4 # maybe fragile test
assert len(entries) == 4 # maybe fragile test
names = ['f', 'g', 'h']
for entry in entries:
try:
names.remove(entry.frame.code.name)
except ValueError:
pass
assert not names
def test_traceback_entry_getsource(self):
tb = self.excinfo.traceback
s = str(tb[-1].getsource() )
assert s.startswith("def f():")
assert s.endswith("raise ValueError")
@astonly
@failsonjython
def test_traceback_entry_getsource_in_construct(self):
source = py.code.Source("""\
def xyz():
try:
raise ValueError
except somenoname:
pass
xyz()
""")
try:
exec (source.compile())
except NameError:
tb = py.code.ExceptionInfo().traceback
print (tb[-1].getsource())
s = str(tb[-1].getsource())
assert s.startswith("def xyz():\n try:")
assert s.strip().endswith("except somenoname:")
def test_traceback_cut(self):
co = py.code.Code(f)
path, firstlineno = co.path, co.firstlineno
traceback = self.excinfo.traceback
newtraceback = traceback.cut(path=path, firstlineno=firstlineno)
assert len(newtraceback) == 1
newtraceback = traceback.cut(path=path, lineno=firstlineno+2)
assert len(newtraceback) == 1
def test_traceback_cut_excludepath(self, testdir):
p = testdir.makepyfile("def f(): raise ValueError")
excinfo = py.test.raises(ValueError, "p.pyimport().f()")
basedir = py.path.local(py.test.__file__).dirpath()
newtraceback = excinfo.traceback.cut(excludepath=basedir)
for x in newtraceback:
if hasattr(x, 'path'):
assert not py.path.local(x.path).relto(basedir)
assert newtraceback[-1].frame.code.path == p
def test_traceback_filter(self):
traceback = self.excinfo.traceback
ntraceback = traceback.filter()
assert len(ntraceback) == len(traceback) - 1
def test_traceback_recursion_index(self):
def f(n):
if n < 10:
n += 1
f(n)
excinfo = py.test.raises(RuntimeError, f, 8)
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex == 3
def test_traceback_only_specific_recursion_errors(self, monkeypatch):
def f(n):
if n == 0:
raise RuntimeError("hello")
f(n-1)
excinfo = pytest.raises(RuntimeError, f, 100)
monkeypatch.delattr(excinfo.traceback.__class__, "recursionindex")
repr = excinfo.getrepr()
assert "RuntimeError: hello" in str(repr.reprcrash)
def test_traceback_no_recursion_index(self):
def do_stuff():
raise RuntimeError
def reraise_me():
import sys
exc, val, tb = sys.exc_info()
py.builtin._reraise(exc, val, tb)
def f(n):
try:
do_stuff()
except:
reraise_me()
excinfo = py.test.raises(RuntimeError, f, 8)
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex is None
def test_traceback_messy_recursion(self):
#XXX: simplified locally testable version
decorator = py.test.importorskip('decorator').decorator
def log(f, *k, **kw):
print('%s %s' % (k, kw))
f(*k, **kw)
log = decorator(log)
def fail():
raise ValueError('')
fail = log(log(fail))
excinfo = py.test.raises(ValueError, fail)
assert excinfo.traceback.recursionindex() is None
def test_traceback_getcrashentry(self):
def i():
__tracebackhide__ = True
raise ValueError
def h():
i()
def g():
__tracebackhide__ = True
h()
def f():
g()
excinfo = py.test.raises(ValueError, f)
tb = excinfo.traceback
entry = tb.getcrashentry()
co = py.code.Code(h)
assert entry.frame.code.path == co.path
assert entry.lineno == co.firstlineno + 1
assert entry.frame.code.name == 'h'
def test_traceback_getcrashentry_empty(self):
def g():
__tracebackhide__ = True
raise ValueError
def f():
__tracebackhide__ = True
g()
excinfo = py.test.raises(ValueError, f)
tb = excinfo.traceback
entry = tb.getcrashentry()
co = py.code.Code(g)
assert entry.frame.code.path == co.path
assert entry.lineno == co.firstlineno + 2
assert entry.frame.code.name == 'g'
def hello(x):
x + 5
def test_tbentry_reinterpret():
try:
hello("hello")
except TypeError:
excinfo = py.code.ExceptionInfo()
tbentry = excinfo.traceback[-1]
msg = tbentry.reinterpret()
assert msg.startswith("TypeError: ('hello' + 5)")
def test_excinfo_exconly():
excinfo = py.test.raises(ValueError, h)
assert excinfo.exconly().startswith('ValueError')
excinfo = py.test.raises(ValueError,
"raise ValueError('hello\\nworld')")
msg = excinfo.exconly(tryshort=True)
assert msg.startswith('ValueError')
assert msg.endswith("world")
def test_excinfo_repr():
excinfo = py.test.raises(ValueError, h)
s = repr(excinfo)
assert s == "<ExceptionInfo ValueError tblen=4>"
def test_excinfo_str():
excinfo = py.test.raises(ValueError, h)
s = str(excinfo)
assert s.startswith(__file__[:-9]) # pyc file and $py.class
assert s.endswith("ValueError")
assert len(s.split(":")) >= 3 # on windows it's 4
def test_excinfo_errisinstance():
excinfo = py.test.raises(ValueError, h)
assert excinfo.errisinstance(ValueError)
def test_excinfo_no_sourcecode():
try:
exec ("raise ValueError()")
except ValueError:
excinfo = py.code.ExceptionInfo()
s = str(excinfo.traceback[-1])
if py.std.sys.version_info < (2,5):
assert s == " File '<string>':1 in ?\n ???\n"
else:
assert s == " File '<string>':1 in <module>\n ???\n"
def test_excinfo_no_python_sourcecode(tmpdir):
#XXX: simplified locally testable version
tmpdir.join('test.txt').write("{{ h()}}:")
jinja2 = py.test.importorskip('jinja2')
loader = jinja2.FileSystemLoader(str(tmpdir))
env = jinja2.Environment(loader=loader)
template = env.get_template('test.txt')
excinfo = py.test.raises(ValueError,
template.render, h=h)
for item in excinfo.traceback:
print(item) #XXX: for some reason jinja.Template.render is printed in full
item.source # shouldnt fail
if item.path.basename == 'test.txt':
assert str(item.source) == '{{ h()}}:'
def test_entrysource_Queue_example():
try:
queue.Queue().get(timeout=0.001)
except queue.Empty:
excinfo = py.code.ExceptionInfo()
entry = excinfo.traceback[-1]
source = entry.getsource()
assert source is not None
s = str(source).strip()
assert s.startswith("def get")
def test_codepath_Queue_example():
try:
queue.Queue().get(timeout=0.001)
except queue.Empty:
excinfo = py.code.ExceptionInfo()
entry = excinfo.traceback[-1]
path = entry.path
assert isinstance(path, py.path.local)
assert path.basename.lower() == "queue.py"
assert path.check()
class TestFormattedExcinfo:
def pytest_funcarg__importasmod(self, request):
def importasmod(source):
source = py.code.Source(source)
tmpdir = request.getfuncargvalue("tmpdir")
modpath = tmpdir.join("mod.py")
tmpdir.ensure("__init__.py")
modpath.write(source)
if invalidate_import_caches is not None:
invalidate_import_caches()
return modpath.pyimport()
return importasmod
def excinfo_from_exec(self, source):
source = py.code.Source(source).strip()
try:
exec (source.compile())
except KeyboardInterrupt:
raise
except:
return py.code.ExceptionInfo()
assert 0, "did not raise"
def test_repr_source(self):
pr = FormattedExcinfo()
source = py.code.Source("""
def f(x):
pass
""").strip()
pr.flow_marker = "|"
lines = pr.get_source(source, 0)
assert len(lines) == 2
assert lines[0] == "| def f(x):"
assert lines[1] == " pass"
def test_repr_source_excinfo(self):
""" check if indentation is right """
pr = FormattedExcinfo()
excinfo = self.excinfo_from_exec("""
def f():
assert 0
f()
""")
pr = FormattedExcinfo()
source = pr._getentrysource(excinfo.traceback[-1])
lines = pr.get_source(source, 1, excinfo)
assert lines == [
' def f():',
'> assert 0',
'E assert 0'
]
def test_repr_source_not_existing(self):
pr = FormattedExcinfo()
co = compile("raise ValueError()", "", "exec")
try:
exec (co)
except ValueError:
excinfo = py.code.ExceptionInfo()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
def test_repr_many_line_source_not_existing(self):
pr = FormattedExcinfo()
co = compile("""
a = 1
raise ValueError()
""", "", "exec")
try:
exec (co)
except ValueError:
excinfo = py.code.ExceptionInfo()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
def test_repr_source_failing_fullsource(self):
pr = FormattedExcinfo()
class FakeCode(object):
class raw:
co_filename = '?'
path = '?'
firstlineno = 5
def fullsource(self):
return None
fullsource = property(fullsource)
class FakeFrame(object):
code = FakeCode()
f_locals = {}
f_globals = {}
class FakeTracebackEntry(py.code.Traceback.Entry):
def __init__(self, tb):
self.lineno = 5+3
@property
def frame(self):
return FakeFrame()
class Traceback(py.code.Traceback):
Entry = FakeTracebackEntry
class FakeExcinfo(py.code.ExceptionInfo):
typename = "Foo"
def __init__(self):
pass
def exconly(self, tryshort):
return "EXC"
def errisinstance(self, cls):
return False
excinfo = FakeExcinfo()
class FakeRawTB(object):
tb_next = None
tb = FakeRawTB()
excinfo.traceback = Traceback(tb)
fail = IOError()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
fail = py.error.ENOENT
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
def test_repr_local(self):
p = FormattedExcinfo(showlocals=True)
loc = {'y': 5, 'z': 7, 'x': 3, '@x': 2, '__builtins__': {}}
reprlocals = p.repr_locals(loc)
assert reprlocals.lines
assert reprlocals.lines[0] == '__builtins__ = <builtins>'
assert reprlocals.lines[1] == 'x = 3'
assert reprlocals.lines[2] == 'y = 5'
assert reprlocals.lines[3] == 'z = 7'
def test_repr_tracebackentry_lines(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello\\nworld")
""")
excinfo = py.test.raises(ValueError, mod.func1)
excinfo.traceback = excinfo.traceback.filter()
p = FormattedExcinfo()
reprtb = p.repr_traceback_entry(excinfo.traceback[-1])
# test as intermittent entry
lines = reprtb.lines
assert lines[0] == ' def func1():'
assert lines[1] == '> raise ValueError("hello\\nworld")'
# test as last entry
p = FormattedExcinfo(showlocals=True)
repr_entry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = repr_entry.lines
assert lines[0] == ' def func1():'
assert lines[1] == '> raise ValueError("hello\\nworld")'
assert lines[2] == 'E ValueError: hello'
assert lines[3] == 'E world'
assert not lines[4:]
loc = repr_entry.reprlocals is not None
loc = repr_entry.reprfileloc
assert loc.path == mod.__file__
assert loc.lineno == 3
#assert loc.message == "ValueError: hello"
def test_repr_tracebackentry_lines(self, importasmod):
mod = importasmod("""
def func1(m, x, y, z):
raise ValueError("hello\\nworld")
""")
excinfo = py.test.raises(ValueError, mod.func1, "m"*90, 5, 13, "z"*120)
excinfo.traceback = excinfo.traceback.filter()
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs.args[0] == ('m', repr("m"*90))
assert reprfuncargs.args[1] == ('x', '5')
assert reprfuncargs.args[2] == ('y', '13')
assert reprfuncargs.args[3] == ('z', repr("z" * 120))
p = FormattedExcinfo(funcargs=True)
repr_entry = p.repr_traceback_entry(entry)
assert repr_entry.reprfuncargs.args == reprfuncargs.args
tw = TWMock()
repr_entry.toterminal(tw)
assert tw.lines[0] == "m = " + repr('m' * 90)
assert tw.lines[1] == "x = 5, y = 13"
assert tw.lines[2] == "z = " + repr('z' * 120)
def test_repr_tracebackentry_lines_var_kw_args(self, importasmod):
mod = importasmod("""
def func1(x, *y, **z):
raise ValueError("hello\\nworld")
""")
excinfo = py.test.raises(ValueError, mod.func1, 'a', 'b', c='d')
excinfo.traceback = excinfo.traceback.filter()
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs.args[0] == ('x', repr('a'))
assert reprfuncargs.args[1] == ('y', repr(('b',)))
assert reprfuncargs.args[2] == ('z', repr({'c': 'd'}))
p = FormattedExcinfo(funcargs=True)
repr_entry = p.repr_traceback_entry(entry)
assert repr_entry.reprfuncargs.args == reprfuncargs.args
tw = TWMock()
repr_entry.toterminal(tw)
assert tw.lines[0] == "x = 'a', y = ('b',), z = {'c': 'd'}"
def test_repr_tracebackentry_short(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
basename = py.path.local(mod.__file__).basename
assert lines[0] == ' func1()'
assert basename in str(reprtb.reprfileloc.path)
assert reprtb.reprfileloc.lineno == 5
# test last entry
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprtb.lines
assert lines[0] == ' raise ValueError("hello")'
assert lines[1] == 'E ValueError: hello'
assert basename in str(reprtb.reprfileloc.path)
assert reprtb.reprfileloc.lineno == 3
def test_repr_tracebackentry_no(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo(style="no")
p.repr_traceback_entry(excinfo.traceback[-2])
p = FormattedExcinfo(style="no")
reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprentry.lines
assert lines[0] == 'E ValueError: hello'
assert not lines[1:]
def test_repr_traceback_tbfilter(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo(tbfilter=True)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 2
p = FormattedExcinfo(tbfilter=False)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 3
def test_traceback_short_no_source(self, importasmod, monkeypatch):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = py.test.raises(ValueError, mod.entry)
from py._code.code import Code
monkeypatch.setattr(Code, 'path', 'bogus')
excinfo.traceback[0].frame.code.path = "bogus"
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
last_p = FormattedExcinfo(style="short")
last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
last_lines = last_reprtb.lines
monkeypatch.undo()
basename = py.path.local(mod.__file__).basename
assert lines[0] == ' func1()'
assert last_lines[0] == ' raise ValueError("hello")'
assert last_lines[1] == 'E ValueError: hello'
def test_repr_traceback_and_excinfo(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
for style in ("long", "short"):
p = FormattedExcinfo(style=style)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 2
assert reprtb.style == style
assert not reprtb.extraline
repr = p.repr_excinfo(excinfo)
assert repr.reprtraceback
assert len(repr.reprtraceback.reprentries) == len(reprtb.reprentries)
assert repr.reprcrash.path.endswith("mod.py")
assert repr.reprcrash.message == "ValueError: 0"
def test_repr_traceback_with_invalid_cwd(self, importasmod, monkeypatch):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo()
def raiseos():
raise OSError(2)
monkeypatch.setattr(py.std.os, 'getcwd', raiseos)
assert p._makepath(__file__) == __file__
reprtb = p.repr_traceback(excinfo)
def test_repr_excinfo_addouterr(self, importasmod):
mod = importasmod("""
def entry():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.entry)
repr = excinfo.getrepr()
repr.addsection("title", "content")
twmock = TWMock()
repr.toterminal(twmock)
assert twmock.lines[-1] == "content"
assert twmock.lines[-2] == ("-", "title")
def test_repr_excinfo_reprcrash(self, importasmod):
mod = importasmod("""
def entry():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.entry)
repr = excinfo.getrepr()
assert repr.reprcrash.path.endswith("mod.py")
assert repr.reprcrash.lineno == 3
assert repr.reprcrash.message == "ValueError"
assert str(repr.reprcrash).endswith("mod.py:3: ValueError")
def test_repr_traceback_recursion(self, importasmod):
mod = importasmod("""
def rec2(x):
return rec1(x+1)
def rec1(x):
return rec2(x-1)
def entry():
rec1(42)
""")
excinfo = py.test.raises(RuntimeError, mod.entry)
for style in ("short", "long", "no"):
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback(excinfo)
assert reprtb.extraline == "!!! Recursion detected (same locals & position)"
assert str(reprtb)
def test_tb_entry_AssertionError(self, importasmod):
# probably this test is a bit redundant
# as py/magic/testing/test_assertion.py
# already tests correctness of
# assertion-reinterpretation logic
mod = importasmod("""
def somefunc():
x = 1
assert x == 2
""")
excinfo = py.test.raises(AssertionError, mod.somefunc)
p = FormattedExcinfo()
reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprentry.lines
assert lines[-1] == "E assert 1 == 2"
def test_reprexcinfo_getrepr(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
for style in ("short", "long", "no"):
for showlocals in (True, False):
repr = excinfo.getrepr(style=style, showlocals=showlocals)
assert isinstance(repr, ReprExceptionInfo)
assert repr.reprtraceback.style == style
def test_reprexcinfo_unicode(self):
from py._code.code import TerminalRepr
class MyRepr(TerminalRepr):
def toterminal(self, tw):
tw.line(py.builtin._totext("я", "utf-8"))
x = py.builtin._totext(MyRepr())
assert x == py.builtin._totext("я", "utf-8")
def test_toterminal_long(self, importasmod):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == " def f():"
assert tw.lines[1] == "> g(3)"
assert tw.lines[2] == ""
assert tw.lines[3].endswith("mod.py:5: ")
assert tw.lines[4] == ("_ ", None)
assert tw.lines[5] == ""
assert tw.lines[6] == " def g(x):"
assert tw.lines[7] == "> raise ValueError(x)"
assert tw.lines[8] == "E ValueError: 3"
assert tw.lines[9] == ""
assert tw.lines[10].endswith("mod.py:3: ValueError")
def test_toterminal_long_missing_source(self, importasmod, tmpdir):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
tmpdir.join('mod.py').remove()
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == "> ???"
assert tw.lines[1] == ""
assert tw.lines[2].endswith("mod.py:5: ")
assert tw.lines[3] == ("_ ", None)
assert tw.lines[4] == ""
assert tw.lines[5] == "> ???"
assert tw.lines[6] == "E ValueError: 3"
assert tw.lines[7] == ""
assert tw.lines[8].endswith("mod.py:3: ValueError")
def test_toterminal_long_incomplete_source(self, importasmod, tmpdir):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
tmpdir.join('mod.py').write('asdf')
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == "> ???"
assert tw.lines[1] == ""
assert tw.lines[2].endswith("mod.py:5: ")
assert tw.lines[3] == ("_ ", None)
assert tw.lines[4] == ""
assert tw.lines[5] == "> ???"
assert tw.lines[6] == "E ValueError: 3"
assert tw.lines[7] == ""
assert tw.lines[8].endswith("mod.py:3: ValueError")
def test_toterminal_long_filenames(self, importasmod):
mod = importasmod("""
def f():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.f)
tw = TWMock()
path = py.path.local(mod.__file__)
old = path.dirpath().chdir()
try:
repr = excinfo.getrepr(abspath=False)
repr.toterminal(tw)
line = tw.lines[-1]
x = py.path.local().bestrelpath(path)
if len(x) < len(str(path)):
assert line == "mod.py:3: ValueError"
repr = excinfo.getrepr(abspath=True)
repr.toterminal(tw)
line = tw.lines[-1]
assert line == "%s:3: ValueError" %(path,)
finally:
old.chdir()
@py.test.mark.multi(reproptions=[
{'style': style, 'showlocals': showlocals,
'funcargs': funcargs, 'tbfilter': tbfilter
} for style in ("long", "short", "no")
for showlocals in (True, False)
for tbfilter in (True, False)
for funcargs in (True, False)])
def test_format_excinfo(self, importasmod, reproptions):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
tw = py.io.TerminalWriter(stringio=True)
repr = excinfo.getrepr(**reproptions)
repr.toterminal(tw)
assert tw.stringio.getvalue()
def test_native_style(self):
excinfo = self.excinfo_from_exec("""
assert 0
""")
repr = excinfo.getrepr(style='native')
assert "assert 0" in str(repr.reprcrash)
s = str(repr)
assert s.startswith('Traceback (most recent call last):\n File')
assert s.endswith('\nAssertionError: assert 0')
assert 'exec (source.compile())' in s
# python 2.4 fails to get the source line for the assert
if py.std.sys.version_info >= (2, 5):
assert s.count('assert 0') == 2
def test_traceback_repr_style(self, importasmod):
mod = importasmod("""
def f():
g()
def g():
h()
def h():
i()
def i():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter()
excinfo.traceback[1].set_repr_style("short")
excinfo.traceback[2].set_repr_style("short")
r = excinfo.getrepr(style="long")
tw = TWMock()
r.toterminal(tw)
for line in tw.lines: print (line)
assert tw.lines[0] == ""
assert tw.lines[1] == " def f():"
assert tw.lines[2] == "> g()"
assert tw.lines[3] == ""
assert tw.lines[4].endswith("mod.py:3: ")
assert tw.lines[5] == ("_ ", None)
assert tw.lines[6].endswith("in g")
assert tw.lines[7] == " h()"
assert tw.lines[8].endswith("in h")
assert tw.lines[9] == " i()"
assert tw.lines[10] == ("_ ", None)
assert tw.lines[11] == ""
assert tw.lines[12] == " def i():"
assert tw.lines[13] == "> raise ValueError()"
assert tw.lines[14] == "E ValueError"
assert tw.lines[15] == ""
assert tw.lines[16].endswith("mod.py:9: ValueError")
| mpl-2.0 | 583,257,117,132,659,100 | -6,636,003,886,678,324,000 | 32.757976 | 88 | 0.546308 | false |
mlperf/inference_results_v0.7 | closed/Atos/code/dlrm/tensorrt/scripts/data_loader_terabyte.py | 18 | 12309 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
from torch.utils.data import Dataset
import torch
import time
import math
from tqdm import tqdm
import argparse
class DataLoader:
"""
DataLoader dedicated for the Criteo Terabyte Click Logs dataset
"""
def __init__(
self,
data_filename,
data_directory,
days,
batch_size,
max_ind_range=-1,
split="train",
drop_last_batch=False
):
self.data_filename = data_filename
self.data_directory = data_directory
self.days = days
self.batch_size = batch_size
self.max_ind_range = max_ind_range
total_file = os.path.join(
data_directory,
data_filename + "_day_count.npz"
)
with np.load(total_file) as data:
total_per_file = data["total_per_file"][np.array(days)]
self.length = sum(total_per_file)
if split == "test" or split == "val":
self.length = int(np.ceil(self.length / 2.))
self.split = split
self.drop_last_batch = drop_last_batch
def __iter__(self):
return iter(
_batch_generator(
self.data_filename, self.data_directory, self.days,
self.batch_size, self.split, self.drop_last_batch, self.max_ind_range
)
)
def __len__(self):
if self.drop_last_batch:
return self.length // self.batch_size
else:
return math.ceil(self.length / self.batch_size)
def _transform_features(
x_int_batch, x_cat_batch, y_batch, max_ind_range, flag_input_torch_tensor=False
):
if max_ind_range > 0:
x_cat_batch = x_cat_batch % max_ind_range
if flag_input_torch_tensor:
x_int_batch = torch.log(x_int_batch.clone().detach().type(torch.float) + 1)
x_cat_batch = x_cat_batch.clone().detach().type(torch.long)
y_batch = y_batch.clone().detach().type(torch.float32).view(-1, 1)
else:
x_int_batch = torch.log(torch.tensor(x_int_batch, dtype=torch.float) + 1)
x_cat_batch = torch.tensor(x_cat_batch, dtype=torch.long)
y_batch = torch.tensor(y_batch, dtype=torch.float32).view(-1, 1)
batch_size = x_cat_batch.shape[0]
feature_count = x_cat_batch.shape[1]
lS_o = torch.arange(batch_size).reshape(1, -1).repeat(feature_count, 1)
return x_int_batch, lS_o, x_cat_batch.t(), y_batch.view(-1, 1)
def _batch_generator(
data_filename, data_directory, days, batch_size, split, drop_last, max_ind_range
):
previous_file = None
for day in days:
filepath = os.path.join(
data_directory,
data_filename + "_{}_reordered.npz".format(day)
)
# print('Loading file: ', filepath)
with np.load(filepath) as data:
x_int = data["X_int"]
x_cat = data["X_cat"]
y = data["y"]
samples_in_file = y.shape[0]
batch_start_idx = 0
if split == "test" or split == "val":
length = int(np.ceil(samples_in_file / 2.))
if split == "test":
samples_in_file = length
elif split == "val":
batch_start_idx = samples_in_file - length
while batch_start_idx < samples_in_file - batch_size:
missing_samples = batch_size
if previous_file is not None:
missing_samples -= previous_file['y'].shape[0]
current_slice = slice(batch_start_idx, batch_start_idx + missing_samples)
x_int_batch = x_int[current_slice]
x_cat_batch = x_cat[current_slice]
y_batch = y[current_slice]
if previous_file is not None:
x_int_batch = np.concatenate(
[previous_file['x_int'], x_int_batch],
axis=0
)
x_cat_batch = np.concatenate(
[previous_file['x_cat'], x_cat_batch],
axis=0
)
y_batch = np.concatenate([previous_file['y'], y_batch], axis=0)
previous_file = None
if x_int_batch.shape[0] != batch_size:
raise ValueError('should not happen')
yield _transform_features(x_int_batch, x_cat_batch, y_batch, max_ind_range)
batch_start_idx += missing_samples
if batch_start_idx != samples_in_file:
current_slice = slice(batch_start_idx, samples_in_file)
if previous_file is not None:
previous_file = {
'x_int' : np.concatenate(
[previous_file['x_int'], x_int[current_slice]],
axis=0
),
'x_cat' : np.concatenate(
[previous_file['x_cat'], x_cat[current_slice]],
axis=0
),
'y' : np.concatenate([previous_file['y'], y[current_slice]], axis=0)
}
else:
previous_file = {
'x_int' : x_int[current_slice],
'x_cat' : x_cat[current_slice],
'y' : y[current_slice]
}
if not drop_last:
yield _transform_features(
previous_file['x_int'],
previous_file['x_cat'],
previous_file['y'],
max_ind_range
)
def _test():
generator = _batch_generator(
data_filename='day',
data_directory='/input',
days=range(23),
split="train",
batch_size=2048
)
t1 = time.time()
for x_int, lS_o, x_cat, y in generator:
t2 = time.time()
time_diff = t2 - t1
t1 = t2
print(
"time {} x_int.shape: {} lS_o.shape: {} x_cat.shape: {} y.shape: {}".format(
time_diff, x_int.shape, lS_o.shape, x_cat.shape, y.shape
)
)
class CriteoBinDataset(Dataset):
"""Binary version of criteo dataset."""
def __init__(self, data_file, counts_file,
batch_size=1, max_ind_range=-1, bytes_per_feature=4):
# dataset
self.tar_fea = 1 # single target
self.den_fea = 13 # 13 dense features
self.spa_fea = 26 # 26 sparse features
self.tad_fea = self.tar_fea + self.den_fea
self.tot_fea = self.tad_fea + self.spa_fea
self.batch_size = batch_size
self.max_ind_range = max_ind_range
self.bytes_per_entry = (bytes_per_feature * self.tot_fea * batch_size)
self.num_entries = math.ceil(os.path.getsize(data_file) / self.bytes_per_entry)
print('data file:', data_file, 'number of batches:', self.num_entries)
self.file = open(data_file, 'rb')
with np.load(counts_file) as data:
self.counts = data["counts"]
# hardcoded for now
self.m_den = 13
def __len__(self):
return self.num_entries
def __getitem__(self, idx):
self.file.seek(idx * self.bytes_per_entry, 0)
raw_data = self.file.read(self.bytes_per_entry)
array = np.frombuffer(raw_data, dtype=np.int32)
tensor = torch.from_numpy(array).view((-1, self.tot_fea))
return _transform_features(x_int_batch=tensor[:, 1:14],
x_cat_batch=tensor[:, 14:],
y_batch=tensor[:, 0],
max_ind_range=self.max_ind_range,
flag_input_torch_tensor=True)
def numpy_to_binary(input_files, output_file_path, split='train'):
"""Convert the data to a binary format to be read with CriteoBinDataset."""
# WARNING - both categorical and numerical data must fit into int32 for
# the following code to work correctly
with open(output_file_path, 'wb') as output_file:
if split == 'train':
for input_file in input_files:
print('Processing file: ', input_file)
np_data = np.load(input_file)
np_data = np.concatenate([np_data['y'].reshape(-1, 1),
np_data['X_int'],
np_data['X_cat']], axis=1)
np_data = np_data.astype(np.int32)
output_file.write(np_data.tobytes())
else:
assert len(input_files) == 1
np_data = np.load(input_files[0])
np_data = np.concatenate([np_data['y'].reshape(-1, 1),
np_data['X_int'],
np_data['X_cat']], axis=1)
np_data = np_data.astype(np.int32)
samples_in_file = np_data.shape[0]
midpoint = int(np.ceil(samples_in_file / 2.))
if split == "test":
begin = 0
end = midpoint
elif split == "val":
begin = midpoint
end = samples_in_file
else:
raise ValueError('Unknown split value: ', split)
output_file.write(np_data[begin:end].tobytes())
def _preprocess(args):
train_files = ['{}_{}_reordered.npz'.format(args.input_data_prefix, day) for
day in range(0, 23)]
test_valid_file = args.input_data_prefix + '_23_reordered.npz'
os.makedirs(args.output_directory, exist_ok=True)
for split in ['train', 'val', 'test']:
print('Running preprocessing for split =', split)
output_file = os.path.join(args.output_directory,
'{}_data.bin'.format(split))
input_files = train_files if split == 'train' else [test_valid_file]
numpy_to_binary(input_files=input_files,
output_file_path=output_file,
split=split)
def _test_bin():
parser = argparse.ArgumentParser()
parser.add_argument('--output_directory', required=True)
parser.add_argument('--input_data_prefix', required=True)
parser.add_argument('--split', choices=['train', 'test', 'val'],
required=True)
args = parser.parse_args()
# _preprocess(args)
binary_data_file = os.path.join(args.output_directory,
'{}_data.bin'.format(args.split))
counts_file = os.path.join(args.output_directory, 'day_fea_count.npz')
dataset_binary = CriteoBinDataset(data_file=binary_data_file,
counts_file=counts_file,
batch_size=2048,)
from dlrm_data_pytorch import CriteoDataset, collate_wrapper_criteo
binary_loader = torch.utils.data.DataLoader(
dataset_binary,
batch_size=None,
shuffle=False,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
)
original_dataset = CriteoDataset(
dataset='terabyte',
max_ind_range=10 * 1000 * 1000,
sub_sample_rate=1,
randomize=True,
split=args.split,
raw_path=args.input_data_prefix,
pro_data='dummy_string',
memory_map=True
)
original_loader = torch.utils.data.DataLoader(
original_dataset,
batch_size=2048,
shuffle=False,
num_workers=0,
collate_fn=collate_wrapper_criteo,
pin_memory=False,
drop_last=False,
)
assert len(dataset_binary) == len(original_loader)
for i, (old_batch, new_batch) in tqdm(enumerate(zip(original_loader,
binary_loader)),
total=len(dataset_binary)):
for j in range(len(new_batch)):
if not np.array_equal(old_batch[j], new_batch[j]):
raise ValueError('FAILED: Datasets not equal')
if i > len(dataset_binary):
break
print('PASSED')
if __name__ == '__main__':
_test()
_test_bin
| apache-2.0 | -8,151,257,624,847,348,000 | -2,691,938,308,793,963,500 | 33.002762 | 88 | 0.529288 | false |
unioslo/cerebrum | contrib/no/uio/user_per_sko.py | 1 | 20843 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import unicode_literals
"""
This file is a UiO-specific extensions of Cerebrum.
It provides user/person statistics about various organizational units (OUs)
at the UiO. The script provides statistics at various granularity levels
(--level option).
--level fakultet produces statistics grouped by faculty (fakultet). A
faculty of a given OU is the first OU in the OU hierarchy that has
(institutt, avdeling) == (0. 0). For all OUs that do not have such
parents, the stats are grouped together under the same tag.
--level institutt produces statistics grouped by department (institutt). A
department of a given OU is the first OU in the OU hierarchy that has
avdeling = 0. For all OUs that do not have such parents, the stats are
grouped together under the same tag.
--level gruppe produces statistics with each OU taking as is, without any
parent lookup.
"""
import argparse
import copy
import types
import locale
from six import text_type
from Cerebrum.Utils import Factory
logger = None
def make_ou_to_stedkode_map(db):
"""
Returns a dictionary mapping ou_ids to (fak,inst,avd) triplets
(stedkoder).
"""
ou = Factory.get("OU")(db)
result = dict()
for row in ou.get_stedkoder():
result[int(row["ou_id"])] = (int(row["fakultet"]),
int(row["institutt"]),
int(row["avdeling"]))
logger.debug("%d ou -> stedkode mappings", len(result))
return result
def make_ou_to_parent_map(perspective, db):
"""
Returns a dictionary mapping ou_ids to their parent ids (or None, if no
parent exists) in a given PERSPECTIVE (FS, LT, etc.)
"""
ou = Factory.get("OU")(db)
result = dict()
for item in ou.get_structure_mappings(perspective):
if item["parent_id"] is not None:
parent_id = int(item["parent_id"])
else:
parent_id = None
result[int(item["ou_id"])] = parent_id
logger.debug("%d ou -> parent mappings", len(result))
return result
#
# sko for all OUs that we cannot classify.
__undef_ou = "andre"
def locate_ou(ou_id, ou2parent, ou2stedkode, level):
"""
Return a suitable parent of OU_ID.
LEVEL determines how far up the hierarchy we are walking.
0 means the entity itself
1 means the closest parent with avdeling part of the sko == 0
2 means the closest parent with avdeling and institutt part of
the sko == 0.
Should we reach the top of the hierarchy without finding a suitable
(parent) OU, a special value is returned. The statistics for that group
will be cumulative for _all_ OU_ID that have no suitable (parent) OU.
"""
ou_id = int(ou_id)
# If level == oneself, just return the ou_id
if level == 0:
return ou2stedkode[ou_id]
tmp = ou_id
while 1:
if tmp is None:
# We reached the top of the hierarchy without seeing anything
# suitable
logger.debug("ou_id %d has no proper parent", ou_id)
return __undef_ou
if tmp not in ou2stedkode:
logger.warn("Cannot locate sko for ou_id %s. Assuming undef", tmp)
return __undef_ou
tmp_sko = ou2stedkode[tmp]
# extract the right part of the sko
if tmp_sko[3-level:] == (0,)*level:
return tmp_sko
# ... or continue with parent
tmp = ou2parent.get(tmp)
def display_statistics(statistics):
"""
STATISTICS is a dictionary indexed by faculty numbers (K) and with
values (V) being dictionaries with statistics information.
This function assumes that _all_ Vs have the exactly same set of keys.
"""
logger.debug("Statistics:")
# The keys we are interested in
keys = ('ansatt', 'student', 'a&s', 'tilknyttet', 'manuell', 'alle manuell')
nosum = ('alle manuell')
# Dictionary for totalling up numbers per affiliation
total = dict([(key, 0) for key in keys])
faculty_keys = statistics.keys()
# Order the faculty output by sko
faculty_keys.sort()
# Yes, the code is ugly, but people do not like
# pprint.print(dictionary)
fak_width = 14
field_width = 10
fak_underline = u"-" * fak_width + u"+"
field_underline = u"-" * field_width + u"+"
fak_format = u"%%%ds" % fak_width
field_format = u"%%%ds" % field_width
values = (u"navn",) + tuple([x[0:field_width] for x in keys])
enc = locale.getpreferredencoding()
print (((fak_format + u"|") % u"fak") +
((field_format + u"|") * len(values)) % values).encode(enc)
print (u"%s%s" % (fak_underline, field_underline * len(values))).encode(enc)
def output_fak(faculty, value):
if isinstance(faculty, types.TupleType):
faculty_text = u"%02d%02d%02d" % faculty
else:
faculty_text = faculty
message = ((fak_format % faculty_text) +
(u"|" + field_format) % value["name"][0:field_width])
for key in keys:
message += "|" + field_format % value[key]
print message.encode(enc)
for faculty in faculty_keys:
value = statistics[faculty]
if 'cum' in value:
value['cum']['name'] = u'totalsum'
if isinstance(faculty, types.TupleType):
text = u'%02d****' % faculty[0]
else:
text = faculty + u' *'
# print (u"%s%s" % (fak_underline,
# field_underline * len(values))).encode(enc)
output_fak(text, value['cum'])
output_fak(faculty, value)
for key in keys:
total[key] += value[key]
print ("%s%s" % (fak_underline, field_underline * len(values))).encode(enc)
message = (fak_format + u"|") % u"Total" + (field_format + u"|") % u"--"
summa = 0
nosumma = 0
for key in keys:
message += (field_format + u"|") % total[key]
if key not in nosum:
summa += total[key]
else:
nosumma += total[key]
print message.encode(enc), (field_format % '{} (+{})'.format(summa, nosumma)
.encode(enc))
def purge_0rows(statistics):
for key in statistics.keys():
val = statistics[key]
cum = val.get('cum')
empty = not any((val[k] for k in val.keys() if k not in ('cum',
'name')))
if cum and empty and any((cum[k] for k in cum.keys() if k != 'name')):
cum['name'] = u'totalsum'
if isinstance(key, types.TupleType):
name = u'%02d****' % key[0]
else:
name = u'%s *' % key
statistics[name] = cum
if empty:
del statistics[key]
return statistics
def make_empty_statistics(level, db, extra_fak_sum=False):
"""
Return an empty dictionary suitable for statistics collection.
Depending on the LEVEL, we'll have a different number of keys in
STATISTICS.
"""
fakultet, institutt, avdeling = None, None, None
if level > 0:
avdeling = 0
if level > 1:
institutt = 0
ou = Factory.get("OU")(db)
sko = ou.get_stedkoder(fakultet=fakultet, institutt=institutt,
avdeling=avdeling)
const = Factory.get("Constants")()
statistics = dict()
# "Unspecified" stats.
statistics[__undef_ou] = {"name": u"undef", 'cum': dict()}
for row in sko:
ou_sko = (int(row["fakultet"]),
int(row["institutt"]),
int(row["avdeling"]))
ou.clear()
ou.find(row["ou_id"])
acronyms = ou.search_name_with_language(
entity_id=ou.entity_id, name_variant=const.ou_name_acronym)
if acronyms:
ou_name = acronyms[0]["name"]
else:
names = ou.search_name_with_language(entity_id=ou.entity_id,
name_variant=const.ou_name)
if names:
ou_name = names[0]["name"]
else:
ou_name = u"N/A"
statistics[ou_sko] = {"name": ou_name}
if extra_fak_sum and ou_sko[1] == ou_sko[2] == 0:
statistics[ou_sko]['cum'] = dict()
for key in statistics.keys():
value = {"ansatt": 0,
"a&s": 0,
"student": 0,
"tilknyttet": 0,
"manuell": 0,
"kun manuell": 0,
"alle manuell": 0,
None: 0,
}
statistics[key].update(value)
if 'cum' in statistics[key]:
statistics[key]['cum'].update(value)
logger.debug("Generating stats for %d top-level OUs" % len(statistics))
return statistics
def make_affiliation_priorities(const):
"""
Prepares and returns a dictionary sorting affiliations/stati according
to this ruleset:
When associating an entity with a faculty during statistics collection,
we have to break ties. The ties are broken in the following fashion:
1. First we compare affiliation; they are classified in this order
ansatt, student, tilknyttet, manuell
2. If an entity has two affiliations of the same type, affiliation
status is used to break up ties in this order:
ansatt -> vitenskaplig, tekadm, bilag, permisjon
student -> aktiv, evu, alumni, perm, opptak, tilbud, soker, privatist
tilknyttet -> emeritus, ekst_forsker, ekst_stip, fagperson, bilag,
gjesteforsker, sivilarbeider, diverse
manuell -> don't care
For the latter two, we just select one entry. Does not matter which
one (this might mean though that statistics run one after the other
might fluctuate. Blame baardj for imprecise specification.
The dictionary uses affiliations as keys. Each value is in turn a
dictionary D, sorting that affiliation's stati. D has at least two
(key,value) pairs -- 'name' and 'value', holding that affiliation's name
and relative sort order.
"""
return {
int(const.affiliation_ansatt): {
"name": "ansatt",
"value": 0,
int(const.affiliation_status_ansatt_vit): 0,
int(const.affiliation_status_ansatt_tekadm): 1,
int(const.affiliation_status_ansatt_bil): 2,
int(const.affiliation_status_ansatt_perm): 3
},
int(const.affiliation_student): {
"name": "student",
"value": 1,
int(const.affiliation_status_student_aktiv): 0,
int(const.affiliation_status_student_evu): 1,
int(const.affiliation_status_student_alumni): 2,
int(const.affiliation_status_student_perm): 3,
int(const.affiliation_status_student_opptak): 4,
int(const.affiliation_status_student_tilbud): 5,
int(const.affiliation_status_student_soker): 6,
int(const.affiliation_status_student_privatist): 7,
},
int(const.affiliation_tilknyttet): {
"name": "tilknyttet",
"value": 2,
int(const.affiliation_tilknyttet_emeritus): 0,
int(const.affiliation_tilknyttet_ekst_forsker): 1,
int(const.affiliation_tilknyttet_ekst_stip): 2,
int(const.affiliation_tilknyttet_fagperson): 3,
int(const.affiliation_tilknyttet_bilag): 4,
int(const.affiliation_tilknyttet_gjesteforsker): 5,
int(const.affiliation_tilknyttet_sivilarbeider): 6,
int(const.affiliation_tilknyttet_diverse): 7,
},
int(const.affiliation_manuell): {
"name": "manuell",
"value": 3,
},
}
def generate_people_statistics(perspective, empty_statistics, level, db,
fak_cum=False):
"""
Collect statistics about people.
PERSPECTIVE determines how we view the OU hierarchy (FS, LT, etc)
EMPTY_STATISTICS is a dictionary with default stat values.
LEVEL designates how far up OU hierarchy we walk
The strategy is pretty straightforward:
for each person P
look at P's affiliations A
sort them according to the rules in make_affiliation_priorities
select the first affiliation FA
register P's contribution under the suitable OU derived from FA.ou_id
and affiliation derived from FA.affiliation
done
This will ensure that each person is counted only once, despite having
multiple affiliations to multiple faculties.
NB! A silly thing is that the ruleset is incomplete. Harass baardj for a
more complete specification.
"""
person = Factory.get("Person")(db)
const = Factory.get("Constants")(db)
ou2stedkode = make_ou_to_stedkode_map(db)
ou2parent = make_ou_to_parent_map(perspective, db)
statistics = copy.deepcopy(empty_statistics)
# Cache processed entities
processed = set()
# Sort order for affiliations/stati
order = make_affiliation_priorities(const)
for row in person.list_affiliations(fetchall=False):
id = int(row["person_id"])
if id in processed:
continue
else:
processed.add(id)
affiliations = person.list_affiliations(person_id=id)
# If there are no affiliations, this person contributes nothing to
# the statistics.
if not affiliations:
continue
affiliations.sort(lambda x, y:
cmp(order[x["affiliation"]],
order[y["affiliation"]])
or cmp(order.get(x["status"], 0),
order.get(y["status"], 0)))
aff = affiliations[0]
ou_result = locate_ou(aff["ou_id"], ou2parent, ou2stedkode, level)
if fak_cum:
ou_cum = locate_ou(aff["ou_id"], ou2parent, ou2stedkode, 2)
# a&s (ansatt og student) has a special rule
affs = [x["affiliation"] for x in affiliations]
if (const.affiliation_student in affs and
const.affiliation_ansatt in affs):
affiliation_name = "a&s"
else:
affiliation_name = order[aff["affiliation"]]["name"]
statistics[ou_result][affiliation_name] += 1
if fak_cum:
statistics[ou_cum]['cum'][affiliation_name] += 1
return statistics
def generate_account_statistics(perspective, empty_statistics, level, db,
extra_cum=False):
"""
Collect statistics about accounts.
for each account A
look at A's affiliations F
sort them according to the rules in make_affiliation_priorities
(and by using priority to break ties)
select the first affiliation FA
register A's contribution under a suitable OU derived from FA.ou_id and
affiliation derived from FA.affiliation
done
"""
account = Factory.get("Account")(db)
const = Factory.get("Constants")(db)
ou2stedkode = make_ou_to_stedkode_map(db)
ou2parent = make_ou_to_parent_map(perspective, db)
statistics = copy.deepcopy(empty_statistics)
# sort order for affiliations
order = make_affiliation_priorities(const)
# Keep track of accounts that had been processed
processed = set()
for row in account.list_accounts_by_type(fetchall=False):
if int(row["account_id"]) in processed:
continue
else:
processed.add(int(row["account_id"]))
affiliations = account.list_accounts_by_type(
account_id=row["account_id"],
filter_expired=True,
fetchall=True)
# Affiliations have already been ordered according to priority. Just
# pick the first one.
if not affiliations:
continue
manual_only = all((x['affiliation'] == const.affiliation_manuell
for x in affiliations))
manual = [x for x in affiliations
if x['affiliation'] == const.affiliation_manuell]
if manual and not manual_only:
for a in affiliations:
if a['affiliation'] != const.affiliation_manuell:
aff = a
break
else:
aff = affiliations[0]
ou_result = locate_ou(aff["ou_id"], ou2parent, ou2stedkode, level)
if extra_cum:
ou_cum = locate_ou(aff["ou_id"], ou2parent, ou2stedkode, 2)
affs = [x["affiliation"] for x in affiliations]
if (const.affiliation_student in affs and
const.affiliation_ansatt in affs):
affiliation_name = "a&s"
else:
affiliation_name = order[aff["affiliation"]]["name"]
try:
statistics[ou_result][affiliation_name] += 1
if extra_cum:
statistics[ou_cum]['cum'][affiliation_name] += 1
if manual_only:
statistics[ou_result]['kun manuell'] += 1
if extra_cum:
statistics[ou_cum]['cum']['kun manuell'] += 1
except:
logger.error("ou_result = %s (%s; %s);",
ou_result, ou_result in statistics,
text_type(aff.ou_id))
raise
for aff in manual:
ou_result = locate_ou(aff['ou_id'], ou2parent, ou2stedkode, level)
try:
statistics[ou_result]['alle manuell'] += 1
if extra_cum:
statistics[locate_ou(aff['ou_id'],
ou2parent,
ou2stedkode,
2)]['cum']['alle manuell'] += 1
except:
logger.error('ou_result = %s (%s; %s); (for manual)',
ou_result, ou_result in statistics,
text_type(aff.ou_id))
return statistics
def main():
global logger
logger = Factory.get_logger("cronjob")
logger.info("Statistics for OUs at UiO")
ap = argparse.ArgumentParser()
ap.add_argument('-p', '--people', action='store_true',
help='Get people statistics')
ap.add_argument('-u', '--users', action='store_true',
help='Get user statistics')
ap.add_argument('-l', '--level', action='store',
choices=('fakultet', 'institutt', 'gruppe'),
required=True,
help='The granularity of the report')
ap.add_argument('-c', '--cumulate', action='store_true',
help='Add cumulated results to faculty')
ap.add_argument('-e', '--perspective', action='store',
choices=('FS', 'SAP', 'LT'),
required=True,
help='OU perspective to use')
ap.add_argument('-k', '--keep', action='store_true',
help='Keep all zero rows')
args = ap.parse_args()
db = Factory.get("Database")()
const = Factory.get("Constants")(db)
level = {"fakultet": 2, "institutt": 1, "gruppe": 0}[args.level]
perspective = {
"FS": const.perspective_fs,
"SAP": const.perspective_sap,
"LT": const.perspective_lt
}[args.perspective]
cum = args.cumulate
if args.people:
people_result = generate_people_statistics(
perspective,
make_empty_statistics(level, db, cum), level, db, cum)
if not args.keep:
purge_0rows(people_result)
display_statistics(people_result)
if args.users:
users_result = generate_account_statistics(
perspective,
make_empty_statistics(level, db, cum), level, db, cum)
if not args.keep:
purge_0rows(users_result)
display_statistics(users_result)
if __name__ == '__main__':
main()
| gpl-2.0 | 4,487,869,850,670,945,000 | -989,838,426,135,253,000 | 33.45124 | 80 | 0.58101 | false |
packet-tracker/onos-1.2.0-custom-build | tools/test/topos/optical2.py | 19 | 2477 | #!/usr/bin/env python
''' file: custom/optical.py '''
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import Intf, Link
from mininet.node import RemoteController
class NullIntf( Intf ):
"A dummy interface with a blank name that doesn't do any configuration"
def __init__( self, name, **params ):
self.name = ''
class NullLink( Link ):
"A dummy link that doesn't touch either interface"
def makeIntfPair( cls, intf1, intf2, *args, **kwargs ):
pass
def delete( self ):
pass
class OpticalTopo(Topo):
def addIntf( self, switch, intfName ):
"Add intf intfName to switch"
self.addLink( switch, switch, cls=NullLink,
intfName1=intfName, cls2=NullIntf )
def __init__(self):
# Initialize topology
Topo.__init__(self)
# Add hosts and switches
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
h4 = self.addHost('h4')
h5 = self.addHost('h5')
h6 = self.addHost('h6')
s1 = self.addSwitch('s1', dpid="0000ffffffff0001")
s2 = self.addSwitch('s2', dpid="0000ffffffff0002")
s3 = self.addSwitch('s3', dpid="0000ffffffff0003")
s4 = self.addSwitch('s4', dpid="0000ffffffff0004")
s5 = self.addSwitch('s5', dpid="0000ffffffff0005")
s6 = self.addSwitch('s6', dpid="0000ffffffff0006")
# Add links from hosts to OVS
self.addLink(s1, h1)
self.addLink(s2, h2)
self.addLink(s3, h3)
self.addLink(s4, h4)
self.addLink(s5, h5)
self.addLink(s6, h6)
# add links from ovs to linc-oe
# sorry about the syntax :(
self.addIntf(s1,'tap29')
self.addIntf(s2,'tap30')
self.addIntf(s3,'tap31')
self.addIntf(s4,'tap32')
self.addIntf(s5,'tap33')
self.addIntf(s6,'tap34')
# if you use, sudo mn --custom custom/optical.py, then register the topo:
topos = {'optical': ( lambda: OpticalTopo() )}
def run():
c = RemoteController('c','127.0.0.1',6653)
net = Mininet( topo=OpticalTopo(),controller=None,autoSetMacs=True)
net.addController(c)
net.start()
#installStaticFlows( net )
CLI( net )
net.stop()
# if the script is run directly (sudo custom/optical.py):
if __name__ == '__main__':
setLogLevel('info')
run()
| apache-2.0 | 6,124,417,334,098,217,000 | -2,852,377,193,263,094,300 | 28.488095 | 81 | 0.604764 | false |
dav1x/ansible | lib/ansible/modules/network/dellos10/dellos10_command.py | 46 | 7522 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <[email protected]>
#
# Copyright (c) 2017 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos10_command
version_added: "2.2"
author: "Senthil Kumar Ganesan (@skg-net)"
short_description: Run commands on remote devices running Dell OS10
description:
- Sends arbitrary commands to a Dell OS10 node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(dellos10_config) to configure Dell OS10 devices.
extends_documentation_fragment: dellos10
options:
commands:
description:
- List of commands to send to the remote dellos10 device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of I(retries), the task fails.
See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
tasks:
- name: run show version on remote devices
dellos10_command:
commands: show version
provider: "{{ cli }}"
- name: run show version and check to see if output contains OS10
dellos10_command:
commands: show version
wait_for: result[0] contains OS10
provider: "{{ cli }}"
- name: run multiple commands on remote nodes
dellos10_command:
commands:
- show version
- show interface
provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
dellos10_command:
commands:
- show version
- show interface
wait_for:
- result[0] contains OS10
- result[1] contains Ethernet
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.dellos10 import run_commands
from ansible.module_utils.dellos10 import dellos10_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.netcli import Conditional
def to_lines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='dellos10_command does not support running config mode '
'commands. Please use dellos10_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
# { command: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(dellos10_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result = {
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
}
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -4,892,969,554,987,962,000 | -5,501,151,736,169,676,000 | 29.827869 | 83 | 0.650758 | false |
Nikoala/CouchPotatoServer | libs/pyutil/version_class.py | 106 | 5299 | # -*- coding: utf-8 -*-
# Copyright (c) 2004-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
extended version number class
"""
# verlib a.k.a. distutils.version by Tarek Ziadé.
from pyutil.verlib import NormalizedVersion
def cmp_version(v1, v2):
return cmp(NormalizedVersion(str(v1)), NormalizedVersion(str(v2)))
# Python Standard Library
import re
# End users see version strings like this:
# "1.0.0"
# ^ ^ ^
# | | |
# | | '- micro version number
# | '- minor version number
# '- major version number
# The first number is "major version number". The second number is the "minor
# version number" -- it gets bumped whenever we make a new release that adds or
# changes functionality. The third version is the "micro version number" -- it
# gets bumped whenever we make a new release that doesn't add or change
# functionality, but just fixes bugs (including performance issues).
# Early-adopter end users see version strings like this:
# "1.0.0a1"
# ^ ^ ^^^
# | | |||
# | | ||'- release number
# | | |'- a=alpha, b=beta, c=release candidate, or none
# | | '- micro version number
# | '- minor version number
# '- major version number
# The optional "a" or "b" stands for "alpha release" or "beta release"
# respectively. The number after "a" or "b" gets bumped every time we
# make a new alpha or beta release. This has the same form and the same
# meaning as version numbers of releases of Python.
# Developers see "full version strings", like this:
# "1.0.0a1-55"
# ^ ^ ^^^ ^
# | | ||| |
# | | ||| '- nano version number
# | | ||'- release number
# | | |'- a=alpha, b=beta, c=release candidate or none
# | | '- micro version number
# | '- minor version number
# '- major version number
# or else like this:
# "1.0.0a1-r22155"
# ^ ^ ^^^ ^
# | | ||| |
# | | ||| '- revision number
# | | ||'- release number
# | | |'- a=alpha, b=beta, c=release candidate or none
# | | '- micro version number
# | '- minor version number
# '- major version number
# The presence of the nano version number means that this is a development
# version. There are no guarantees about compatibility, etc. This version is
# considered to be more recent than the version without this field
# (e.g. "1.0.0a1").
# The nano version number or revision number is meaningful only to developers.
# It gets generated automatically from darcs revision control history by
# "darcsver.py". The nano version number is the count of patches that have been
# applied since the last version number tag was applied. The revision number is
# the count of all patches that have been applied in the history.
VERSION_BASE_RE_STR="(\d+)(\.(\d+)(\.(\d+))?)?((a|b|c)(\d+))?(\.dev(\d+))?"
VERSION_SUFFIX_RE_STR="(-(\d+|r\d+)|.post\d+)?"
VERSION_RE_STR=VERSION_BASE_RE_STR + VERSION_SUFFIX_RE_STR
VERSION_RE=re.compile("^" + VERSION_RE_STR + "$")
class Version(object):
def __init__(self, vstring=None):
self.major = None
self.minor = None
self.micro = None
self.prereleasetag = None
self.prerelease = None
self.nano = None
self.revision = None
if vstring:
try:
self.parse(vstring)
except ValueError, le:
le.args = tuple(le.args + ('vstring:', vstring,))
raise
def parse(self, vstring):
mo = VERSION_RE.search(vstring)
if not mo:
raise ValueError, "Not a valid version string for pyutil.version_class.Version(): %r" % (vstring,)
self.major = int(mo.group(1))
self.minor = mo.group(3) and int(mo.group(3)) or 0
self.micro = mo.group(5) and int(mo.group(5)) or 0
reltag = mo.group(6)
if reltag:
reltagnum = int(mo.group(8))
self.prereleasetag = mo.group(7)
self.prerelease = reltagnum
if mo.group(11):
if mo.group(11)[0] == '-':
if mo.group(12)[0] == 'r':
self.revision = int(mo.group(12)[1:])
else:
self.nano = int(mo.group(12))
else:
assert mo.group(11).startswith('.post'), mo.group(11)
self.revision = int(mo.group(11)[5:])
# XXX in the future, to be compatible with the Python "rational version numbering" scheme, we should move to using .post$REV instead of -r$REV:
# self.fullstr = "%d.%d.%d%s%s" % (self.major, self.minor, self.micro, self.prereleasetag and "%s%d" % (self.prereleasetag, self.prerelease,) or "", self.nano and "-%d" % (self.nano,) or self.revision and ".post%d" % (self.revision,) or "",)
self.fullstr = "%d.%d.%d%s%s" % (self.major, self.minor, self.micro, self.prereleasetag and "%s%d" % (self.prereleasetag, self.prerelease,) or "", self.nano and "-%d" % (self.nano,) or self.revision and "-r%d" % (self.revision,) or "",)
def user_str(self):
return self.full_str()
def full_str(self):
if hasattr(self, 'fullstr'):
return self.fullstr
else:
return 'None'
def __str__(self):
return self.full_str()
def __repr__(self):
return self.__str__()
def __cmp__ (self, other):
return cmp_version(self, other)
| gpl-3.0 | -5,839,275,492,258,434,000 | 1,815,693,967,255,281,200 | 34.783784 | 249 | 0.608384 | false |
bgris/ODL_bgris | lib/python3.5/distutils/tests/test_install_lib.py | 11 | 3934 | """Tests for distutils.command.install_data."""
import sys
import os
import importlib.util
import unittest
from distutils.command.install_lib import install_lib
from distutils.extension import Extension
from distutils.tests import support
from distutils.errors import DistutilsOptionError
from test.support import run_unittest
class InstallLibTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def test_finalize_options(self):
dist = self.create_dist()[1]
cmd = install_lib(dist)
cmd.finalize_options()
self.assertEqual(cmd.compile, 1)
self.assertEqual(cmd.optimize, 0)
# optimize must be 0, 1, or 2
cmd.optimize = 'foo'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
cmd.optimize = '4'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
cmd.optimize = '2'
cmd.finalize_options()
self.assertEqual(cmd.optimize, 2)
@unittest.skipIf(sys.dont_write_bytecode, 'byte-compile disabled')
def test_byte_compile(self):
project_dir, dist = self.create_dist()
os.chdir(project_dir)
cmd = install_lib(dist)
cmd.compile = cmd.optimize = 1
f = os.path.join(project_dir, 'foo.py')
self.write_file(f, '# python file')
cmd.byte_compile([f])
pyc_file = importlib.util.cache_from_source('foo.py', optimization='')
pyc_opt_file = importlib.util.cache_from_source('foo.py',
optimization=cmd.optimize)
self.assertTrue(os.path.exists(pyc_file))
self.assertTrue(os.path.exists(pyc_opt_file))
def test_get_outputs(self):
project_dir, dist = self.create_dist()
os.chdir(project_dir)
os.mkdir('spam')
cmd = install_lib(dist)
# setting up a dist environment
cmd.compile = cmd.optimize = 1
cmd.install_dir = self.mkdtemp()
f = os.path.join(project_dir, 'spam', '__init__.py')
self.write_file(f, '# python package')
cmd.distribution.ext_modules = [Extension('foo', ['xxx'])]
cmd.distribution.packages = ['spam']
cmd.distribution.script_name = 'setup.py'
# get_outputs should return 4 elements: spam/__init__.py and .pyc,
# foo.import-tag-abiflags.so / foo.pyd
outputs = cmd.get_outputs()
self.assertEqual(len(outputs), 4, outputs)
def test_get_inputs(self):
project_dir, dist = self.create_dist()
os.chdir(project_dir)
os.mkdir('spam')
cmd = install_lib(dist)
# setting up a dist environment
cmd.compile = cmd.optimize = 1
cmd.install_dir = self.mkdtemp()
f = os.path.join(project_dir, 'spam', '__init__.py')
self.write_file(f, '# python package')
cmd.distribution.ext_modules = [Extension('foo', ['xxx'])]
cmd.distribution.packages = ['spam']
cmd.distribution.script_name = 'setup.py'
# get_inputs should return 2 elements: spam/__init__.py and
# foo.import-tag-abiflags.so / foo.pyd
inputs = cmd.get_inputs()
self.assertEqual(len(inputs), 2, inputs)
def test_dont_write_bytecode(self):
# makes sure byte_compile is not used
dist = self.create_dist()[1]
cmd = install_lib(dist)
cmd.compile = 1
cmd.optimize = 1
old_dont_write_bytecode = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
cmd.byte_compile([])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
self.assertIn('byte-compiling is disabled', self.logs[0][1])
def test_suite():
return unittest.makeSuite(InstallLibTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| gpl-3.0 | 5,206,743,437,547,656,000 | -5,028,539,180,285,045,000 | 33.508772 | 78 | 0.608541 | false |
GNOME/hamster-applet | wafadmin/Tools/python.py | 7 | 11127 | #! /usr/bin/env python
# encoding: utf-8
import os,sys
import TaskGen,Utils,Utils,Runner,Options,Build
from Logs import debug,warn,info
from TaskGen import extension,taskgen,before,after,feature
from Configure import conf
EXT_PY=['.py']
FRAG_2='''
#include "Python.h"
#ifdef __cplusplus
extern "C" {
#endif
void Py_Initialize(void);
void Py_Finalize(void);
#ifdef __cplusplus
}
#endif
int main()
{
Py_Initialize();
Py_Finalize();
return 0;
}
'''
def init_pyext(self):
self.default_install_path='${PYTHONDIR}'
self.uselib=self.to_list(getattr(self,'uselib',''))
if not'PYEXT'in self.uselib:
self.uselib.append('PYEXT')
self.env['MACBUNDLE']=True
def pyext_shlib_ext(self):
self.env['shlib_PATTERN']=self.env['pyext_PATTERN']
def init_pyembed(self):
self.uselib=self.to_list(getattr(self,'uselib',''))
if not'PYEMBED'in self.uselib:
self.uselib.append('PYEMBED')
def process_py(self,node):
if not(self.bld.is_install and self.install_path):
return
def inst_py(ctx):
install_pyfile(self,node)
self.bld.add_post_fun(inst_py)
def install_pyfile(self,node):
path=self.bld.get_install_path(self.install_path+os.sep+node.name,self.env)
self.bld.install_files(self.install_path,[node],self.env,self.chmod,postpone=False)
if self.bld.is_install<0:
info("* removing byte compiled python files")
for x in'co':
try:
os.remove(path+x)
except OSError:
pass
if self.bld.is_install>0:
if self.env['PYC']or self.env['PYO']:
info("* byte compiling %r"%path)
if self.env['PYC']:
program=("""
import sys, py_compile
for pyfile in sys.argv[1:]:
py_compile.compile(pyfile, pyfile + 'c')
""")
argv=[self.env['PYTHON'],'-c',program,path]
ret=Utils.pproc.Popen(argv).wait()
if ret:
raise Utils.WafError('bytecode compilation failed %r'%path)
if self.env['PYO']:
program=("""
import sys, py_compile
for pyfile in sys.argv[1:]:
py_compile.compile(pyfile, pyfile + 'o')
""")
argv=[self.env['PYTHON'],self.env['PYFLAGS_OPT'],'-c',program,path]
ret=Utils.pproc.Popen(argv).wait()
if ret:
raise Utils.WafError('bytecode compilation failed %r'%path)
class py_taskgen(TaskGen.task_gen):
def __init__(self,*k,**kw):
TaskGen.task_gen.__init__(self,*k,**kw)
def init_py(self):
self.default_install_path='${PYTHONDIR}'
def _get_python_variables(python_exe,variables,imports=['import sys']):
program=list(imports)
program.append('')
for v in variables:
program.append("print(repr(%s))"%v)
os_env=dict(os.environ)
try:
del os_env['MACOSX_DEPLOYMENT_TARGET']
except KeyError:
pass
proc=Utils.pproc.Popen([python_exe,"-c",'\n'.join(program)],stdout=Utils.pproc.PIPE,env=os_env)
output=proc.communicate()[0].split("\n")
if proc.returncode:
if Options.options.verbose:
warn("Python program to extract python configuration variables failed:\n%s"%'\n'.join(["line %03i: %s"%(lineno+1,line)for lineno,line in enumerate(program)]))
raise RuntimeError
return_values=[]
for s in output:
s=s.strip()
if not s:
continue
if s=='None':
return_values.append(None)
elif s[0]=="'"and s[-1]=="'":
return_values.append(s[1:-1])
elif s[0].isdigit():
return_values.append(int(s))
else:break
return return_values
def check_python_headers(conf,mandatory=True):
if not conf.env['CC_NAME']and not conf.env['CXX_NAME']:
conf.fatal('load a compiler first (gcc, g++, ..)')
if not conf.env['PYTHON_VERSION']:
conf.check_python_version()
env=conf.env
python=env['PYTHON']
if not python:
conf.fatal('could not find the python executable')
if Options.platform=='darwin':
conf.check_tool('osx')
try:
v='prefix SO SYSLIBS LDFLAGS SHLIBS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET'.split()
(python_prefix,python_SO,python_SYSLIBS,python_LDFLAGS,python_SHLIBS,python_LIBDIR,python_LIBPL,INCLUDEPY,Py_ENABLE_SHARED,python_MACOSX_DEPLOYMENT_TARGET)=_get_python_variables(python,["get_config_var('%s')"%x for x in v],['from distutils.sysconfig import get_config_var'])
except RuntimeError:
conf.fatal("Python development headers not found (-v for details).")
conf.log.write("""Configuration returned from %r:
python_prefix = %r
python_SO = %r
python_SYSLIBS = %r
python_LDFLAGS = %r
python_SHLIBS = %r
python_LIBDIR = %r
python_LIBPL = %r
INCLUDEPY = %r
Py_ENABLE_SHARED = %r
MACOSX_DEPLOYMENT_TARGET = %r
"""%(python,python_prefix,python_SO,python_SYSLIBS,python_LDFLAGS,python_SHLIBS,python_LIBDIR,python_LIBPL,INCLUDEPY,Py_ENABLE_SHARED,python_MACOSX_DEPLOYMENT_TARGET))
if python_MACOSX_DEPLOYMENT_TARGET:
conf.env['MACOSX_DEPLOYMENT_TARGET']=python_MACOSX_DEPLOYMENT_TARGET
conf.environ['MACOSX_DEPLOYMENT_TARGET']=python_MACOSX_DEPLOYMENT_TARGET
env['pyext_PATTERN']='%s'+python_SO
if python_SYSLIBS is not None:
for lib in python_SYSLIBS.split():
if lib.startswith('-l'):
lib=lib[2:]
env.append_value('LIB_PYEMBED',lib)
if python_SHLIBS is not None:
for lib in python_SHLIBS.split():
if lib.startswith('-l'):
env.append_value('LIB_PYEMBED',lib[2:])
else:
env.append_value('LINKFLAGS_PYEMBED',lib)
if Options.platform!='darwin'and python_LDFLAGS:
env.append_value('LINKFLAGS_PYEMBED',python_LDFLAGS.split())
result=False
name='python'+env['PYTHON_VERSION']
if python_LIBDIR is not None:
path=[python_LIBDIR]
conf.log.write("\n\n# Trying LIBDIR: %r\n"%path)
result=conf.check(lib=name,uselib='PYEMBED',libpath=path)
if not result and python_LIBPL is not None:
conf.log.write("\n\n# try again with -L$python_LIBPL (some systems don't install the python library in $prefix/lib)\n")
path=[python_LIBPL]
result=conf.check(lib=name,uselib='PYEMBED',libpath=path)
if not result:
conf.log.write("\n\n# try again with -L$prefix/libs, and pythonXY name rather than pythonX.Y (win32)\n")
path=[os.path.join(python_prefix,"libs")]
name='python'+env['PYTHON_VERSION'].replace('.','')
result=conf.check(lib=name,uselib='PYEMBED',libpath=path)
if result:
env['LIBPATH_PYEMBED']=path
env.append_value('LIB_PYEMBED',name)
else:
conf.log.write("\n\n### LIB NOT FOUND\n")
if(sys.platform=='win32'or sys.platform.startswith('os2')or sys.platform=='darwin'or Py_ENABLE_SHARED):
env['LIBPATH_PYEXT']=env['LIBPATH_PYEMBED']
env['LIB_PYEXT']=env['LIB_PYEMBED']
python_config=conf.find_program('python%s-config'%('.'.join(env['PYTHON_VERSION'].split('.')[:2])),var='PYTHON_CONFIG')
if not python_config:
python_config=conf.find_program('python-config-%s'%('.'.join(env['PYTHON_VERSION'].split('.')[:2])),var='PYTHON_CONFIG')
includes=[]
if python_config:
for incstr in Utils.cmd_output("%s %s --includes"%(python,python_config)).strip().split():
if(incstr.startswith('-I')or incstr.startswith('/I')):
incstr=incstr[2:]
if incstr not in includes:
includes.append(incstr)
conf.log.write("Include path for Python extensions ""(found via python-config --includes): %r\n"%(includes,))
env['CPPPATH_PYEXT']=includes
env['CPPPATH_PYEMBED']=includes
else:
conf.log.write("Include path for Python extensions ""(found via distutils module): %r\n"%(INCLUDEPY,))
env['CPPPATH_PYEXT']=[INCLUDEPY]
env['CPPPATH_PYEMBED']=[INCLUDEPY]
if env['CC_NAME']=='gcc':
env.append_value('CCFLAGS_PYEMBED','-fno-strict-aliasing')
env.append_value('CCFLAGS_PYEXT','-fno-strict-aliasing')
if env['CXX_NAME']=='gcc':
env.append_value('CXXFLAGS_PYEMBED','-fno-strict-aliasing')
env.append_value('CXXFLAGS_PYEXT','-fno-strict-aliasing')
conf.check(define_name='HAVE_PYTHON_H',uselib='PYEMBED',fragment=FRAG_2,errmsg='Could not find the python development headers',mandatory=mandatory)
def check_python_version(conf,minver=None):
assert minver is None or isinstance(minver,tuple)
python=conf.env['PYTHON']
if not python:
conf.fatal('could not find the python executable')
cmd=[python,"-c","import sys\nfor x in sys.version_info: print(str(x))"]
debug('python: Running python command %r'%cmd)
proc=Utils.pproc.Popen(cmd,stdout=Utils.pproc.PIPE)
lines=proc.communicate()[0].split()
assert len(lines)==5,"found %i lines, expected 5: %r"%(len(lines),lines)
pyver_tuple=(int(lines[0]),int(lines[1]),int(lines[2]),lines[3],int(lines[4]))
result=(minver is None)or(pyver_tuple>=minver)
if result:
pyver='.'.join([str(x)for x in pyver_tuple[:2]])
conf.env['PYTHON_VERSION']=pyver
if'PYTHONDIR'in conf.environ:
pydir=conf.environ['PYTHONDIR']
else:
if sys.platform=='win32':
(python_LIBDEST,pydir)=_get_python_variables(python,["get_config_var('LIBDEST')","get_python_lib(standard_lib=0, prefix=%r)"%conf.env['PREFIX']],['from distutils.sysconfig import get_config_var, get_python_lib'])
else:
python_LIBDEST=None
(pydir,)=_get_python_variables(python,["get_python_lib(standard_lib=0, prefix=%r)"%conf.env['PREFIX']],['from distutils.sysconfig import get_config_var, get_python_lib'])
if python_LIBDEST is None:
if conf.env['LIBDIR']:
python_LIBDEST=os.path.join(conf.env['LIBDIR'],"python"+pyver)
else:
python_LIBDEST=os.path.join(conf.env['PREFIX'],"lib","python"+pyver)
if hasattr(conf,'define'):
conf.define('PYTHONDIR',pydir)
conf.env['PYTHONDIR']=pydir
pyver_full='.'.join(map(str,pyver_tuple[:3]))
if minver is None:
conf.check_message_custom('Python version','',pyver_full)
else:
minver_str='.'.join(map(str,minver))
conf.check_message('Python version',">= %s"%minver_str,result,option=pyver_full)
if not result:
conf.fatal('The python version is too old (%r)'%pyver_full)
def check_python_module(conf,module_name):
result=not Utils.pproc.Popen([conf.env['PYTHON'],"-c","import %s"%module_name],stderr=Utils.pproc.PIPE,stdout=Utils.pproc.PIPE).wait()
conf.check_message('Python module',module_name,result)
if not result:
conf.fatal('Could not find the python module %r'%module_name)
def detect(conf):
if not conf.env.PYTHON:
conf.env.PYTHON=sys.executable
python=conf.find_program('python',var='PYTHON')
if not python:
conf.fatal('Could not find the path of the python executable')
v=conf.env
v['PYCMD']='"import sys, py_compile;py_compile.compile(sys.argv[1], sys.argv[2])"'
v['PYFLAGS']=''
v['PYFLAGS_OPT']='-O'
v['PYC']=getattr(Options.options,'pyc',1)
v['PYO']=getattr(Options.options,'pyo',1)
def set_options(opt):
opt.add_option('--nopyc',action='store_false',default=1,help='Do not install bytecode compiled .pyc files (configuration) [Default:install]',dest='pyc')
opt.add_option('--nopyo',action='store_false',default=1,help='Do not install optimised compiled .pyo files (configuration) [Default:install]',dest='pyo')
before('apply_incpaths','apply_lib_vars','apply_type_vars')(init_pyext)
feature('pyext')(init_pyext)
before('apply_bundle')(init_pyext)
before('apply_link','apply_lib_vars','apply_type_vars')(pyext_shlib_ext)
after('apply_bundle')(pyext_shlib_ext)
feature('pyext')(pyext_shlib_ext)
before('apply_incpaths','apply_lib_vars','apply_type_vars')(init_pyembed)
feature('pyembed')(init_pyembed)
extension(EXT_PY)(process_py)
before('apply_core')(init_py)
after('vars_target_cprogram','vars_target_cshlib')(init_py)
feature('py')(init_py)
conf(check_python_headers)
conf(check_python_version)
conf(check_python_module)
| gpl-3.0 | 3,696,898,263,588,615,700 | 3,305,208,317,367,711,000 | 39.02518 | 276 | 0.709176 | false |
cbrepo/celery | celery/tests/test_utils/test_utils_info.py | 14 | 1112 | from __future__ import absolute_import
from celery import Celery
from celery.utils import textindent
from celery.tests.utils import Case
RANDTEXT = """\
The quick brown
fox jumps
over the
lazy dog\
"""
RANDTEXT_RES = """\
The quick brown
fox jumps
over the
lazy dog\
"""
QUEUES = {"queue1": {
"exchange": "exchange1",
"exchange_type": "type1",
"binding_key": "bind1"},
"queue2": {
"exchange": "exchange2",
"exchange_type": "type2",
"binding_key": "bind2"}}
QUEUE_FORMAT1 = """. queue1: exchange:exchange1 (type1) binding:bind1"""
QUEUE_FORMAT2 = """. queue2: exchange:exchange2 (type2) binding:bind2"""
class TestInfo(Case):
def test_textindent(self):
self.assertEqual(textindent(RANDTEXT, 4), RANDTEXT_RES)
def test_format_queues(self):
celery = Celery(set_as_current=False)
celery.amqp.queues = celery.amqp.Queues(QUEUES)
self.assertEqual(sorted(celery.amqp.queues.format().split("\n")),
sorted([QUEUE_FORMAT1, QUEUE_FORMAT2]))
| bsd-3-clause | 7,985,096,494,223,075,000 | 3,792,984,102,071,539,700 | 24.272727 | 77 | 0.606115 | false |
asterisk/testsuite | lib/python/asterisk/pluggable_modules.py | 1 | 38856 | """Generic pluggable modules
Copyright (C) 2012, Digium, Inc.
Kinsey Moore <[email protected]>
This program is free software, distributed under the terms of
the GNU General Public License Version 2.
"""
import os
import sys
import logging
import shutil
import re
sys.path.append("lib/python")
from .ami import AMIEventInstance
from twisted.internet import reactor
from starpy import fastagi
from .test_runner import load_and_parse_module
from .pluggable_registry import PLUGGABLE_ACTION_REGISTRY,\
PLUGGABLE_EVENT_REGISTRY,\
PluggableRegistry
from . import matcher
LOGGER = logging.getLogger(__name__)
class Originator(object):
"""Pluggable module class that originates calls in Asterisk"""
def __init__(self, module_config, test_object):
"""Initialize config and register test_object callbacks."""
self.ami = None
test_object.register_ami_observer(self.ami_connect)
self.test_object = test_object
self.current_destination = 0
self.ami_callback = None
self.scenario_count = 0
self.config = {
'channel': 'Local/s@default',
'application': 'Echo',
'data': '',
'context': '',
'exten': '',
'priority': '',
'ignore-originate-failure': 'no',
'trigger': 'scenario_start',
'scenario-trigger-after': None,
'scenario-name': None,
'id': '0',
'account': None,
'async': 'False',
'event': None,
'timeout': None,
'codecs': None,
}
# process config
if not module_config:
return
for k in module_config.keys():
if k in self.config:
self.config[k] = module_config[k]
if self.config['trigger'] == 'scenario_start':
if (self.config['scenario-trigger-after'] is not None and
self.config['scenario-name'] is not None):
LOGGER.error("Conflict between 'scenario-trigger-after' and "
"'scenario-name'. Only one may be used.")
raise Exception
else:
test_object.register_scenario_started_observer(
self.scenario_started)
elif self.config['trigger'] == 'event':
if not self.config['event']:
LOGGER.error("Event specifier for trigger type 'event' is "
"missing")
raise Exception
# set id to the AMI id for the origination if it is unset
if 'id' not in self.config['event']:
self.config['event']['id'] = self.config['id']
callback = AMIPrivateCallbackInstance(self.config['event'],
test_object,
self.originate_callback)
self.ami_callback = callback
return
def ami_connect(self, ami):
"""Handle new AMI connections."""
LOGGER.info("AMI %s connected", str(ami.id))
if str(ami.id) == self.config['id']:
self.ami = ami
if self.config['trigger'] == 'ami_connect':
self.originate_call()
return
def failure(self, result):
"""Handle origination failure."""
if self.config['ignore-originate-failure'] == 'no':
LOGGER.info("Originate failed: %s", str(result))
self.test_object.set_passed(False)
return None
def originate_callback(self, ami, event):
"""Handle event callbacks."""
LOGGER.info("Got event callback for Origination")
self.originate_call()
return True
def originate_call(self):
"""Originate the call"""
LOGGER.info("Originating call")
defer = None
if len(self.config['context']) > 0:
defer = self.ami.originate(channel=self.config['channel'],
context=self.config['context'],
exten=self.config['exten'],
priority=self.config['priority'],
timeout=self.config['timeout'],
account=self.config['account'],
codecs=self.config['codecs'],
async=self.config['async'])
else:
defer = self.ami.originate(channel=self.config['channel'],
application=self.config['application'],
data=self.config['data'],
timeout=self.config['timeout'],
account=self.config['account'],
codecs=self.config['codecs'],
async=self.config['async'])
defer.addErrback(self.failure)
def scenario_started(self, result):
"""Handle origination on scenario start if configured to do so."""
LOGGER.info("Scenario '%s' started", result.name)
if self.config['scenario-name'] is not None:
if result.name == self.config['scenario-name']:
LOGGER.debug("Scenario name '%s' matched", result.name)
self.originate_call()
elif self.config['scenario-trigger-after'] is not None:
self.scenario_count += 1
trigger_count = int(self.config['scenario-trigger-after'])
if self.scenario_count == trigger_count:
LOGGER.debug("Scenario count has been met")
self.originate_call()
else:
self.originate_call()
return result
class AMIPrivateCallbackInstance(AMIEventInstance):
"""Subclass of AMIEventInstance that operates by calling a user-defined
callback function. The callback function returns the current disposition
of the test (i.e. whether the test is currently passing or failing).
"""
def __init__(self, instance_config, test_object, callback):
"""Constructor"""
super(AMIPrivateCallbackInstance, self).__init__(instance_config,
test_object)
self.callback = callback
if 'start' in instance_config:
self.passed = True if instance_config['start'] == 'pass' else False
def event_callback(self, ami, event):
"""Generic AMI event handler"""
self.passed = self.callback(ami, event)
return (ami, event)
def check_result(self, callback_param):
"""Set the test status based on the result of self.callback"""
self.test_object.set_passed(self.passed)
return callback_param
class AMIChannelHangup(AMIEventInstance):
"""An AMIEventInstance derived class that hangs up a channel when an
event is matched."""
def __init__(self, instance_config, test_object):
"""Constructor for pluggable modules"""
super(AMIChannelHangup, self).__init__(instance_config, test_object)
self.hungup_channel = False
self.delay = instance_config.get('delay') or 0
def event_callback(self, ami, event):
"""Override of the event callback"""
if self.hungup_channel:
return
if 'channel' not in event:
return
LOGGER.info("Hanging up channel %s", event['channel'])
self.hungup_channel = True
reactor.callLater(self.delay, ami.hangup, event['channel'])
return (ami, event)
class AMIChannelHangupAll(AMIEventInstance):
"""An AMIEventInstance derived class that hangs up all the channels when
an event is matched."""
def __init__(self, instance_config, test_object):
"""Constructor for pluggable modules"""
super(AMIChannelHangupAll, self).__init__(instance_config, test_object)
test_object.register_ami_observer(self.__ami_connect)
self.channels = []
def __ami_connect(self, ami):
"""AMI connect handler"""
if str(ami.id) in self.ids:
ami.registerEvent('Newchannel', self.__new_channel_handler)
ami.registerEvent('Hangup', self.__hangup_handler)
def __new_channel_handler(self, ami, event):
"""New channel event handler"""
self.channels.append({'id': ami.id, 'channel': event['channel']})
def __hangup_handler(self, ami, event):
"""Hangup event handler"""
objects = [x for x in self.channels if
(x['id'] == ami.id and
x['channel'] == event['channel'])]
for obj in objects:
self.channels.remove(obj)
def event_callback(self, ami, event):
"""Override of the event callback"""
def __hangup_ignore(result):
"""Ignore hangup errors"""
# Ignore hangup errors - if the channel is gone, we don't care
return result
objects = [x for x in self.channels if x['id'] == ami.id]
for obj in objects:
LOGGER.info("Hanging up channel %s", obj['channel'])
ami.hangup(obj['channel']).addErrback(__hangup_ignore)
self.channels.remove(obj)
class ARIHangupMonitor(object):
"""A class that monitors for new channels and hungup channels in ARI.
This is the same as HangupMonitor, except that it listens over ARI
to avoid any issue with race conditions. Note that it will implicitly
create a global subscription to channels, which may conflict with
tests that don't expect to get all those events.
"""
def __init__(self, instance_config, test_object):
"""Constructor"""
super(ARIHangupMonitor, self).__init__()
self.delay = 0
if 'delay-stop' in instance_config:
self.delay = instance_config['delay-stop']
self.test_object = test_object
self.test_object.register_ari_observer(self._handle_ws_open)
self.test_object.register_ws_event_handler(self._handle_ws_event)
self.channels = 0
def _handle_ws_open(self, ari_receiver):
"""Handle WS connection"""
LOGGER.info(ari_receiver.apps)
for app in ari_receiver.apps.split(','):
self.test_object.ari.post('applications/{0}/subscription?eventSource=channel:'.format(app))
def _handle_ws_event(self, message):
"""Handle a message received over the WS"""
message_type = message.get('type')
if (message_type == 'ChannelCreated'):
LOGGER.info('Tracking channel %s', message.get('channel'))
self.channels += 1
elif (message_type == 'ChannelDestroyed'):
LOGGER.info('Destroyed channel %s', message.get('channel'))
self.channels -= 1
if (self.channels == 0):
LOGGER.info("All channels have hungup; stopping test after %d seconds",
self.delay)
reactor.callLater(self.delay, self.test_object.stop_reactor)
class HangupMonitor(object):
"""A class that monitors for new channels and hungup channels. When all
channels it has monitored for have hung up, it ends the test.
Essentially, as long as there are new channels it will keep the test
going; however, once channels start hanging up it will kill the test
on the last hung up channel.
"""
def __init__(self, instance_config, test_object):
"""Constructor for pluggable modules"""
super(HangupMonitor, self).__init__()
self.config = instance_config
self.test_object = test_object
self.test_object.register_ami_observer(self.__ami_connect)
self.channels = []
self.num_calls = 0
def __ami_connect(self, ami):
"""AMI connect handler"""
if str(ami.id) in self.config["ids"]:
ami.registerEvent('Newchannel', self.__new_channel_handler)
ami.registerEvent('Rename', self.__rename_handler)
ami.registerEvent('Hangup', self.__hangup_handler)
def __new_channel_handler(self, ami, event):
"""Handler for the Newchannel event"""
LOGGER.debug("Tracking channel %s", event['channel'])
self.channels.append(event['channel'])
return (ami, event)
def __hangup_handler(self, ami, event):
"""Handler for the Hangup event"""
LOGGER.debug("Channel %s hungup", event['channel'])
self.channels.remove(event['channel'])
self.num_calls += 1
if 'min_calls' in self.config \
and self.num_calls < self.config["min_calls"]:
return (ami, event)
if len(self.channels) == 0:
LOGGER.info("All channels have hungup; stopping test")
self.stop_test()
return (ami, event)
def __rename_handler(self, ami, event):
LOGGER.debug("Channel {0} renamed to {1}".format(event['channel'],
event['newname']))
self.channels.append(event['newname'])
self.channels.remove(event['channel'])
def stop_test(self):
"""Allow subclasses to take different actions to stop the test."""
self.test_object.stop_reactor()
class CallFiles(object):
""" This class allows call files to be created from a YAML configuration"""
def __init__(self, instance_config, test_object):
"""Constructor"""
super(CallFiles, self).__init__()
self.test_object = test_object
self.call_file_instances = instance_config
self.locale = ""
if self.call_file_instances:
self.test_object.register_ami_observer(self.ami_connect)
else:
LOGGER.error("No configuration was specified for call files")
self.test_failed()
def test_failed(self):
"""Checks to see whether or not the call files were
correctly specified """
self.test_object.set_passed(False)
self.test_object.stop_reactor()
def write_call_file(self, call_file_num, call_file):
"""Write out the specified call file
Keyword Parameters:
call_file_num Which call file in the test we're writing out
call_file A dictionary containing the call file
information, derived from the YAML
"""
params = call_file.get('call-file-params')
if not params:
LOGGER.error("No call file parameters specified")
self.test_failed()
return
self.locale = ("%s%s/tmp/test%d.call" %
(self.test_object.ast[int(call_file['id'])].base,
self.test_object.ast[int(call_file['id'])].directories
["astspooldir"], call_file_num))
with open(self.locale, 'w') as outfile:
for key, value in params.items():
outfile.write("%s: %s\n" % (key, value))
LOGGER.debug("Wrote call file to %s", self.locale)
self.move_file(call_file_num, call_file)
def ami_connect(self, ami):
"""Handler for AMI connection """
for index, call_file in enumerate(self.call_file_instances):
if ami.id == int(call_file.get('id')):
self.write_call_file(index, call_file)
def move_file(self, call_file_num, call_file):
"""Moves call files to astspooldir directory to be run """
src_file = self.locale
dst_file = ("%s%s/outgoing/test%s.call" %
(self.test_object.ast[int(call_file['id'])].base,
self.test_object.ast[int(call_file['id'])].directories
["astspooldir"], call_file_num))
LOGGER.info("Moving file %s to %s", src_file, dst_file)
shutil.move(src_file, dst_file)
os.utime(dst_file, None)
class SoundChecker(object):
""" This class allows the user to check if a given sound file exists,
whether a sound file fits within a range of file size, and has enough
energy in it to pass a BackgroundDetect threshold of silence"""
def __init__(self, module_config, test_object):
"""Constructor"""
super(SoundChecker, self).__init__()
self.test_object = test_object
self.module_config = module_config['sound-file-config']
self.filepath = ""
self.sound_file = {}
self.actions = []
self.index = 0
self.action_index = 0
self.auto_stop = module_config.get('auto-stop', False)
self.test_object.register_ami_observer(self.ami_connect)
def build_sound_file_location(self, filename, path_type, path_name=""):
"""Creates the filepath for the given sound file.
File_path_types should include relative and absolute, and if absolute,
look for an absolute_path string. Fails if the path type is invalid
or parameters are missing
Keyword Arguments:
filename: The same of the file to be set and used
path-type: The type of path file- either relative or absolute
path_name: Optional parameter that must be included with an
absolute type_path. It stores the actual file path to be
used
returns:
filepath: The filepath that this sound_file test will use.
"""
asterisk_instance = self.module_config[self.index].get('id', 0)
if path_type == 'relative':
ast_instance = self.test_object.ast[asterisk_instance]
base_path = ast_instance.base
spool_dir = ast_instance.directories["astspooldir"]
filepath = ("%s%s/%s" % (base_path, spool_dir, filename))
return filepath
elif path_type == 'absolute':
if path_name:
filepath = "%s/%s" % (path_name, filename)
return filepath
else:
raise Exception("No absolute path specified")
else:
raise Exception("Invalid file path type or undefined path type")
def size_check(self, ami):
"""The size range test.
Checks whether the size of the file meets a certain threshold of
byte size. Fails if it doesn't. Iterates action_index so that the
next action can be done.
Keyword Arguments:
ami- the AMI instance used by this test, not used by this function
but needs to be passed into sound_check_actions to continue
"""
filesize = -1
filesize = os.path.getsize(self.filepath)
size = self.actions[self.action_index].get('size')
tolerance = self.actions[self.action_index].get('tolerance')
if ((filesize - size) > tolerance) or ((size - filesize) > tolerance):
LOGGER.error("""File '%s' failed size check: expected %d, actual %d
(tolerance +/- %d""" % (
self.filepath, size, filesize, tolerance))
self.test_object.set_passed(False)
if self.auto_stop:
self.test_object.stop_reactor()
return
else:
self.action_index += 1
self.sound_check_actions(ami)
def energy_check(self, ami):
"""Checks the energy levels of a given sound file.
This is done by creating a local channel into a dialplan extension
that does a BackgroundDetect on the sound file. The extensions must
be defined by the user.
Keyword Arguments:
ami- the AMI instance used by this test
"""
energyfile = self.filepath[:self.filepath.find('.')]
action = self.actions[self.action_index]
#ami.originate has no type var, so action['type'] has to be popped
action.pop('type')
action['variable'] = {'SOUNDFILE': energyfile}
ami.registerEvent("UserEvent", self.verify_presence)
dfr = ami.originate(**action)
dfr.addErrback(self.test_object.handle_originate_failure)
def sound_check_actions(self, ami):
"""The second, usually larger part of the sound check.
Iterates through the actions that will be used to check various
aspects of the given sound file. Waits for the output of the action
functions before continuing. If all actions have been completed resets
the test to register for a new event as defined in the triggers. If
all sound-file tests have been finished, sets the test to passed.
Keyword Arguments:
ami- the AMI instance used by this test
"""
if self.action_index == len(self.actions):
self.action_index = 0
self.index += 1
if self.index == len(self.module_config):
LOGGER.info("Test successfully passed")
self.test_object.set_passed(True)
if self.auto_stop:
self.test_object.stop_reactor()
else:
self.event_register(ami)
else:
actiontype = self.actions[self.action_index]['type']
if actiontype == 'size_check':
self.size_check(ami)
elif actiontype == 'energy_check':
self.energy_check(ami)
def verify_presence(self, ami, event):
"""UserEvent verifier for the energy check.
Verifies that the userevent that was given off by the dialplan
extension called in energy_check was a soundcheck userevent and that
the status is pass. Fails if the status was not pass. Iterates
action_index if it passed so that the next action can be done.
Keyword Arguments:
ami- the AMI instance used by this test
event- the event (Userevent) being picked up by the AMI that
determines whether a correct amount of energy has been detected.
"""
userevent = event.get("userevent")
if not userevent:
return
if userevent.lower() != "soundcheck":
return
LOGGER.info("Checking the sound check userevent")
ami.deregisterEvent("UserEvent", self.verify_presence)
status = event.get("status")
LOGGER.debug("Status of the sound check is " + status)
if status != "pass":
LOGGER.error("The sound check wasn't successful- test failed")
self.test_object.set_passed(False)
if self.auto_stop:
self.test_object.stop_reactor()
return
else:
self.action_index += 1
self.sound_check_actions(ami)
def sound_check_start(self, ami, event):
"""The first part of the sound_check test. Required.
It deregisters the prerequisite event as defined in triggers so that
it doesn't keep looking for said events. Then it checks whether the
sound file described in the YAML exists by looking for the file with
the given path. The filepath is determined by calling
build_sound_file_location. After this initial part of sound_check,
the remaining actions are then called.
Keyword Arguments:
ami- the AMI instance used by this test
event- the event (defined by the triggers section) being picked up by
the AMI that allows the rest of the pluggable module to be accessed
"""
config = self.module_config[self.index]
instance_id = config.get('id', 0)
if ami.id != instance_id:
return
current_trigger = config['trigger']['match']
for key, value in current_trigger.items():
if key.lower() not in event:
LOGGER.debug("Condition %s not in event, returning", key)
return
if not re.match(value, event.get(key.lower())):
LOGGER.debug("Condition %s: %s does not match %s: %s in event",
key, value, key, event.get(key.lower()))
return
else:
LOGGER.debug("Condition %s: %s matches %s: %s in event",
key, value, key, event.get(key.lower()))
ami.deregisterEvent(current_trigger.get('event'),
self.sound_check_start)
self.sound_file = config['sound-file']
if not self.sound_file:
raise Exception("No sound file parameters specified")
if (not self.sound_file.get('file-name')
or not self.sound_file.get('file-path-type')):
raise Exception("No file or file path type specified")
if self.sound_file.get('absolute-path'):
file_name = self.sound_file['file-name']
file_path_type = self.sound_file['file-path-type']
absolute_path = self.sound_file['absolute-path']
self.filepath = self.build_sound_file_location(file_name,
file_path_type,
absolute_path)
else:
file_name = self.sound_file['file-name']
file_path_type = self.sound_file['file-path-type']
self.filepath = self.build_sound_file_location(file_name,
file_path_type)
#Find the filesize here if it exists
if not os.path.exists(self.filepath):
LOGGER.error("File '%s' does not exist!" % self.filepath)
self.test_object.set_passed(False)
if self.auto_stop:
self.test_object.stop_reactor()
return
self.actions = self.sound_file.get('actions')
self.sound_check_actions(ami)
def event_register(self, ami):
"""Event register for the prerequisite event.
Starts looking for the event defined in the triggers section of the
YAML that allows the rest of the test to be accessed.
Keyword Arguments:
ami- the AMI instance used by this test
"""
current_trigger = self.module_config[self.index]['trigger']['match']
trigger_id = self.module_config[self.index]['trigger'].get('id', 0)
if ami.id != trigger_id:
return
if not current_trigger:
raise Exception("Missing a trigger")
else:
ami.registerEvent(current_trigger.get('event'),
self.sound_check_start)
def ami_connect(self, ami):
"""Starts the ami_connection and then calls event_register
Keyword Arguments:
ami- the AMI instance used by this test
"""
self.event_register(ami)
class AsteriskConfigModule(object):
"""A pluggable module that installs an Asterisk config file.
Configuration is as follows:
config-section:
-
id: 0
src: tests/my-test/my-super-awesome.conf
dst: extensions.conf
"""
def __init__(self, instance_config, test_object):
"""Constructor for pluggable modules"""
super(AsteriskConfigModule, self).__init__()
for info in instance_config:
asterisk_instance = test_object.ast[info.get('id', 0)]
asterisk_instance.install_config(info['src'], info['dst'])
class FastAGIModule(object):
"""A class that makes a FastAGI server available to be called via the
dialplan and allows simple commands to be executed.
Configuration is as follows:
config-section:
host: '127.0.0.1'
port: 4573
commands:
- 'SET VARIABLE "CHANVAR1" "CHANVAL1"'
Instead of commands, a callback may be specified to interact with Asterisk:
callback:
module: fast_agi_callback_module
method: fast_agi_callback_method
"""
def __init__(self, instance_config, test_object):
"""Constructor for pluggable modules"""
super(FastAGIModule, self).__init__()
self.test_object = test_object
self.port = instance_config.get('port', 4573)
self.host = instance_config.get('host', '127.0.0.1')
self.commands = instance_config.get('commands')
if 'callback' in instance_config:
self.callback_module = instance_config['callback']['module']
self.callback_method = instance_config['callback']['method']
fastagi_factory = fastagi.FastAGIFactory(self.fastagi_connect)
reactor.listenTCP(self.port, fastagi_factory,
test_object.reactor_timeout, self.host)
def fastagi_connect(self, agi):
"""Handle incoming connections"""
if self.commands:
return self.execute_command(agi, 0)
else:
method = load_and_parse_module(self.callback_module + '.' + self.callback_method)
method(self.test_object, agi)
def on_command_failure(self, reason, agi, idx):
"""Failure handler for executing commands"""
LOGGER.error('Could not execute command %s: %s',
idx, self.commands[idx])
LOGGER.error(reason.getTraceback())
agi.finish()
def on_command_success(self, result, agi, idx):
"""Handler for executing commands"""
LOGGER.debug("Successfully executed '%s': %s",
self.commands[idx], result)
self.execute_command(agi, idx + 1)
def execute_command(self, agi, idx):
"""Execute the requested command"""
if len(self.commands) <= idx:
LOGGER.debug("Completed all commands for %s:%s",
self.host, self.port)
agi.finish()
return
agi.sendCommand(self.commands[idx])\
.addCallback(self.on_command_success, agi, idx)\
.addErrback(self.on_command_failure, agi, idx)
class EventActionModule(object):
"""A class that links arbitrary events with one or more actions.
Configuration is as follows:
config-section:
actions:
custom-action-name: custom.action.location
events:
custom-event-name: custom.event.location
mapping:
-
custom-event-name:
event-config-goes-here
custom-action-name:
action-config-goes-here
Or if no locally-defined events or actions are desired:
config-section:
-
event-name:
event-config-goes-here
other-event-name:
event-config-goes-here
action-name:
action-config-goes-here
Or if no locally-defined events or actions are desired and only one set is
desired:
config-section:
event-name:
event-config-goes-here
action-name:
action-config-goes-here
Any event in a set will trigger all actions in a set.
"""
def __init__(self, instance_config, test_object):
"""Constructor for pluggable modules"""
super(EventActionModule, self).__init__()
self.test_object = test_object
config = instance_config
if isinstance(config, list):
config = {"mapping": config}
elif isinstance(config, dict) and "mapping" not in config:
config = {"mapping": [config]}
# Parse out local action and event definitions
self.local_action_registry = PluggableRegistry()
self.local_event_registry = PluggableRegistry()
def register_modules(config, registry):
"""Register pluggable modules into the registry"""
for key, local_class_path in config.items():
local_class = load_and_parse_module(local_class_path)
if not local_class:
raise Exception("Unable to load %s for module key %s"
% (local_class_path, key))
registry.register(key, local_class)
if "actions" in config:
register_modules(config["actions"], self.local_action_registry)
if "events" in config:
register_modules(config["events"], self.local_event_registry)
self.event_action_sets = []
self.parse_mapping(config)
def parse_mapping(self, config):
"""Parse out the mapping and instantiate objects."""
for e_a_set in config["mapping"]:
plug_set = {"events": [], "actions": []}
for plug_name, plug_config in e_a_set.items():
self.parse_module_config(plug_set, plug_name, plug_config)
if 0 == len(plug_set["events"]):
raise Exception("Pluggable set requires at least one event: %s"
% e_a_set)
self.event_action_sets.append(plug_set)
def parse_module_config(self, plug_set, plug_name, plug_config):
"""Parse module config and update the pluggable module set"""
if self.local_event_registry.check(plug_name):
plug_class = self.local_event_registry.get_class(plug_name)
plug_set["events"].append(
plug_class(self.test_object, self.event_triggered, plug_config))
elif self.local_action_registry.check(plug_name):
plug_class = self.local_action_registry.get_class(plug_name)
plug_set["actions"].append(
plug_class(self.test_object, plug_config))
elif PLUGGABLE_EVENT_REGISTRY.check(plug_name):
plug_class = PLUGGABLE_EVENT_REGISTRY.get_class(plug_name)
plug_set["events"].append(
plug_class(self.test_object, self.event_triggered, plug_config))
elif PLUGGABLE_ACTION_REGISTRY.check(plug_name):
plug_class = PLUGGABLE_ACTION_REGISTRY.get_class(plug_name)
plug_set["actions"].append(
plug_class(self.test_object, plug_config))
else:
raise Exception("Pluggable component '%s' not recognized"
% plug_name)
def find_triggered_set(self, triggered_by):
"""Find the set that was triggered."""
for e_a_set in self.event_action_sets:
for event_mod in e_a_set["events"]:
if event_mod == triggered_by:
return e_a_set
return None
def event_triggered(self, triggered_by, source=None, extra=None):
"""Run actions for the triggered set."""
triggered_set = self.find_triggered_set(triggered_by)
if not triggered_set:
raise Exception("Unable to find event/action set for %s"
% triggered_by)
for action_mod in triggered_set["actions"]:
action_mod.run(triggered_by, source, extra)
class TestStartEventModule(object):
"""An event module that triggers when the test starts."""
def __init__(self, test_object, triggered_callback, config):
"""Setup the test start observer"""
self.test_object = test_object
self.triggered_callback = triggered_callback
self.config = config
test_object.register_start_observer(self.start_observer)
def start_observer(self, ast):
"""Notify the event-action mapper that the test has started."""
self.triggered_callback(self, ast)
PLUGGABLE_EVENT_REGISTRY.register("test-start", TestStartEventModule)
class LogActionModule(object):
"""An action module that logs a message when triggered."""
def __init__(self, test_object, config):
"""Setup the test start observer"""
self.test_object = test_object
self.message = config["message"]
def run(self, triggered_by, source, extra):
"""Log a message."""
LOGGER.info(self.message)
PLUGGABLE_ACTION_REGISTRY.register("logger", LogActionModule)
class ValidateLogActionModule(object):
"""An action module that validates a log files existence."""
def __init__(self, test_object, config):
self.test_object = test_object
self.logfile = config["logfile"]
self.pass_if_present = config["pass-if-present"]
def run(self, triggered_by, source, extra):
"""Check to see if log file is present or not."""
files = []
testpath = ('%s/var/log/asterisk' %
(self.test_object.ast[0].base))
for (dirpath, dirnames, filenames) in os.walk(testpath):
files.extend(filenames)
break
if self.logfile in files:
if (self.pass_if_present):
self.test_object.set_passed(True)
else:
self.test_object.set_passed(False)
else:
if (self.pass_if_present):
self.test_object.set_passed(False)
else:
self.test_object.set_passed(True)
PLUGGABLE_ACTION_REGISTRY.register("validate-log", ValidateLogActionModule)
class CallbackActionModule(object):
"""An action module that calls the specified callback."""
def __init__(self, test_object, config):
"""Setup the test start observer"""
self.test_object = test_object
self.module = config["module"]
self.method = config["method"]
def run(self, triggered_by, source, extra):
"""Call the callback."""
method = load_and_parse_module(self.module + '.' + self.method)
self.test_object.set_passed(method(self.test_object, triggered_by,
source, extra))
PLUGGABLE_ACTION_REGISTRY.register("callback", CallbackActionModule)
class StopTestActionModule(object):
"""Action module that stops a test"""
def __init__(self, test_object, config):
"""Constructor
Keyword Arguments:
test_object The main test object
config The pluggable module config
"""
self.test_object = test_object
def run(self, triggered_by, source, extra):
"""Execute the action, which stops the test
Keyword Arguments:
triggered_by The event that triggered this action
source The Asterisk interface object that provided the event
extra Source dependent data
"""
self.test_object.stop_reactor()
PLUGGABLE_ACTION_REGISTRY.register("stop_test", StopTestActionModule)
class PjsuaPhoneActionModule(object):
"""An action module that instructs a phone to perform an action."""
def __init__(self, test_object, config):
"""Setup the test start observer"""
self.test_object = test_object
self.module = "phones"
self.method = config["action"]
self.config = config
def run(self, triggered_by, source, extra):
"""Instruct phone to perform action"""
method = load_and_parse_module(self.module + "." + self.method)
method(self.test_object, triggered_by, source, extra, self.config)
PLUGGABLE_ACTION_REGISTRY.register("pjsua_phone", PjsuaPhoneActionModule)
| gpl-2.0 | -5,028,680,981,586,674,000 | -1,214,595,440,385,717,800 | 39.016478 | 103 | 0.587528 | false |
etos/django | tests/forms_tests/tests/test_validators.py | 111 | 2210 | import re
from unittest import TestCase
from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
class TestFieldWithValidators(TestCase):
def test_all_errors_get_reported(self):
class UserForm(forms.Form):
full_name = forms.CharField(
max_length=50,
validators=[
validators.validate_integer,
validators.validate_email,
]
)
string = forms.CharField(
max_length=50,
validators=[
validators.RegexValidator(
regex='^[a-zA-Z]*$',
message="Letters only.",
)
]
)
ignore_case_string = forms.CharField(
max_length=50,
validators=[
validators.RegexValidator(
regex='^[a-z]*$',
message="Letters only.",
flags=re.IGNORECASE,
)
]
)
form = UserForm({
'full_name': 'not int nor mail',
'string': '2 is not correct',
'ignore_case_string': "IgnORE Case strIng",
})
with self.assertRaises(ValidationError) as e:
form.fields['full_name'].clean('not int nor mail')
self.assertEqual(2, len(e.exception.messages))
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['string'], ["Letters only."])
self.assertEqual(form.errors['string'], ["Letters only."])
def test_field_validators_can_be_any_iterable(self):
class UserForm(forms.Form):
full_name = forms.CharField(
max_length=50,
validators=(
validators.validate_integer,
validators.validate_email,
)
)
form = UserForm({'full_name': 'not int nor mail'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['full_name'], ['Enter a valid integer.', 'Enter a valid email address.'])
| bsd-3-clause | -180,757,905,842,426,140 | -5,461,417,125,566,681,000 | 33.53125 | 110 | 0.503167 | false |
hyperized/ansible | test/units/modules/network/netvisor/test_pn_vrouter_interface_ip.py | 23 | 2787 | # Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.netvisor import pn_vrouter_interface_ip
from units.modules.utils import set_module_args
from .nvos_module import TestNvosModule
class TestVrouterInterfaceIpModule(TestNvosModule):
module = pn_vrouter_interface_ip
def setUp(self):
self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_vrouter_interface_ip.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
self.mock_run_check_cli = patch('ansible.modules.network.netvisor.pn_vrouter_interface_ip.check_cli')
self.run_check_cli = self.mock_run_check_cli.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
self.mock_run_check_cli.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['present'] == 'vrouter-interface-ip-add':
results = dict(
changed=True,
cli_cmd=cli
)
elif state_map['absent'] == 'vrouter-interface-ip-remove':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
if state == 'present':
self.run_check_cli.return_value = True, False, True
if state == 'absent':
self.run_check_cli.return_value = True, True, True
def test_vrouter_interface_ip_add(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_vrouter_name': 'foo-vrouter',
'pn_ip': '2620:0:1651:1::30', 'pn_netmask': '127', 'pn_nic': 'eth0.4092', 'state': 'present'})
result = self.execute_module(changed=True, state='present')
expected_cmd = ' switch sw01 vrouter-interface-ip-add vrouter-name foo-vrouter nic eth0.4092 '
expected_cmd += 'ip 2620:0:1651:1::30 netmask 127'
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_vrouter_interface_ip_remove(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_vrouter_name': 'foo-vrouter',
'pn_ip': '2620:0:1651:1::30', 'pn_nic': 'eth0.4092', 'state': 'absent'})
result = self.execute_module(changed=True, state='absent')
expected_cmd = ' switch sw01 vrouter-interface-ip-remove vrouter-name foo-vrouter nic eth0.4092 '
expected_cmd += 'ip 2620:0:1651:1::30 '
self.assertEqual(result['cli_cmd'], expected_cmd)
| gpl-3.0 | -8,483,683,549,716,537,000 | 6,390,948,926,024,503,000 | 43.951613 | 119 | 0.63258 | false |
dya2/python-for-android | python-build/python-libs/gdata/src/gdata/oauth/__init__.py | 157 | 19407 | import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
# Generic exception class
class OAuthError(RuntimeError):
def __init__(self, message='OAuth error occured.'):
self.message = message
# optional WWW-Authenticate header (401 error)
def build_authenticate_header(realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# url escape
def escape(s):
# escape '/' too
return urllib.quote(s, safe='~')
# util function: current timestamp
# seconds since epoch (UTC)
def generate_timestamp():
return int(time.time())
# util function: nonce
# pseudorandom number
def generate_nonce(length=8):
return ''.join([str(random.randint(0, 9)) for i in range(length)])
# OAuthConsumer is a data type that represents the identity of the Consumer
# via its shared secret with the Service Provider.
class OAuthConsumer(object):
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
# OAuthToken is a data type that represents an End User via either an access
# or request token.
class OAuthToken(object):
# access tokens and request tokens
key = None
secret = None
'''
key = the token
secret = the token secret
'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def to_string(self):
return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
# return a token from something like:
# oauth_token_secret=digg&oauth_token=digg
def from_string(s):
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
return OAuthToken(key, secret)
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
# OAuthRequest represents the request and can be serialized
class OAuthRequest(object):
'''
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
... any additional parameters, as defined by the Service Provider.
'''
parameters = None # oauth parameters
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce')
# get any non-oauth parameters
def get_nonoauth_parameters(self):
parameters = {}
for k, v in self.parameters.iteritems():
# ignore oauth parameters
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
# serialize as a header for an HTTPAuth request
def to_header(self, realm=''):
auth_header = 'OAuth realm="%s"' % realm
# add the oauth parameters
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
# serialize as post data for a POST request
def to_postdata(self):
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems()])
# serialize as a url for a GET request
def to_url(self):
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
# return a string that consists of all the parameters that need to be signed
def get_normalized_parameters(self):
params = self.parameters
try:
# exclude the signature if it exists
del params['oauth_signature']
except:
pass
key_values = params.items()
# sort lexicographically, first after key, then after value
key_values.sort()
# combine key value pairs in string and escape
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values])
# just uppercases the http method
def get_normalized_http_method(self):
return self.http_method.upper()
# parses the url and rebuilds it to be scheme://host/path
def get_normalized_http_url(self):
parts = urlparse.urlparse(self.http_url)
url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path
return url_string
# set the signature parameter to the result of build_signature
def sign_request(self, signature_method, consumer, token):
# set the signature method
self.set_parameter('oauth_signature_method', signature_method.get_name())
# set the signature
self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
# call the build signature method within the signature method
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None, query_string=None):
# combine multiple parameter sources
if parameters is None:
parameters = {}
# headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# check that the authorization header is OAuth
if auth_header.index('OAuth') > -1:
try:
# get the parameters from the header
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from Authorization header.')
# GET or POST query string
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
# util function: turn Authorization: header into parameters, has to do some unescaping
def _split_header(header):
params = {}
parts = header.split(',')
for param in parts:
# ignore realm parameter
if param.find('OAuth realm') > -1:
continue
# remove whitespace
param = param.strip()
# split key-value
param_parts = param.split('=', 1)
# remove quotes and unescape the value
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
# util function: turn url string into parameters, has to do some unescaping
def _split_url_string(param_str):
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
# OAuthServer is a worker to check a requests validity against a data store
class OAuthServer(object):
timestamp_threshold = 300 # in seconds, five minutes
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, oauth_data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
# process a request_token request
# returns the request token on success
def fetch_request_token(self, oauth_request):
try:
# get the request token for authorization
token = self._get_token(oauth_request, 'request')
except OAuthError:
# no token required for the initial token request
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
self._check_signature(oauth_request, consumer, None)
# fetch a new token
token = self.data_store.fetch_request_token(consumer)
return token
# process an access_token request
# returns the access token on success
def fetch_access_token(self, oauth_request):
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the request token
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token)
return new_token
# verify an api call, checks all the parameters
def verify_request(self, oauth_request):
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the access token
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
# authorize a request token
def authorize_token(self, token, user):
return self.data_store.authorize_request_token(token, user)
# get the callback url
def get_callback(self, oauth_request):
return oauth_request.get_parameter('oauth_callback')
# optional support for the authenticate header
def build_authenticate_header(self, realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# verify the correct version request for this server
def _get_version(self, oauth_request):
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
# figure out the signature with some defaults
def _get_signature_method(self, oauth_request):
try:
signature_method = oauth_request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# get the signature method object
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
if not consumer_key:
raise OAuthError('Invalid consumer key.')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
# try to find the token for the provided request token key
def _get_token(self, oauth_request, token_type='access'):
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# validate the signature
valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
# verify that timestamp is recentish
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
# verify that the nonce is uniqueish
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
# OAuthClient is a worker to attempt to execute a request
class OAuthClient(object):
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def access_resource(self, oauth_request):
# -> some protected resource
raise NotImplementedError
# OAuthDataStore is a database abstraction used to lookup consumers and tokens
class OAuthDataStore(object):
def lookup_consumer(self, key):
# -> OAuthConsumer
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
# -> OAuthToken
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp):
# -> OAuthToken
raise NotImplementedError
def fetch_request_token(self, oauth_consumer):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token):
# -> OAuthToken
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
# -> OAuthToken
raise NotImplementedError
# OAuthSignatureMethod is a strategy class that implements a signature method
class OAuthSignatureMethod(object):
def get_name(self):
# -> str
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
# -> str key, str raw
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
# -> str
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
# build the base signature string
key, raw = self.build_signature_base_string(oauth_request, consumer, token)
# hmac object
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # deprecated
hashed = hmac.new(key, raw, sha)
# calculate the digest base 64
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
# concatenate the consumer key and secret
sig = escape(consumer.secret) + '&'
if token:
sig = sig + escape(token.secret)
return sig
def build_signature(self, oauth_request, consumer, token):
return self.build_signature_base_string(oauth_request, consumer, token)
| apache-2.0 | 3,109,599,094,409,035,000 | 5,321,359,189,361,594,000 | 35.03626 | 158 | 0.619519 | false |
tempbottle/rethinkdb | drivers/python/rethinkdb/_import.py | 9 | 39332 | #!/usr/bin/env python
from __future__ import print_function
import signal
import sys, os, datetime, time, json, traceback, csv
import multiprocessing, multiprocessing.queues, subprocess, re, ctypes, codecs
from optparse import OptionParser
from ._backup import *
import rethinkdb as r
# Used because of API differences in the csv module, taken from
# http://python3porting.com/problems.html
PY3 = sys.version > '3'
#json parameters
json_read_chunk_size = 32 * 1024
json_max_buffer_size = 128 * 1024 * 1024
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from itertools import imap
except ImportError:
imap = map
try:
xrange
except NameError:
xrange = range
try:
from multiprocessing import SimpleQueue
except ImportError:
from multiprocessing.queues import SimpleQueue
info = "'rethinkdb import` loads data into a RethinkDB cluster"
usage = "\
rethinkdb import -d DIR [-c HOST:PORT] [-a AUTH_KEY] [--force]\n\
[-i (DB | DB.TABLE)] [--clients NUM]\n\
rethinkdb import -f FILE --table DB.TABLE [-c HOST:PORT] [-a AUTH_KEY]\n\
[--force] [--clients NUM] [--format (csv | json)] [--pkey PRIMARY_KEY]\n\
[--delimiter CHARACTER] [--custom-header FIELD,FIELD... [--no-header]]"
def print_import_help():
print(info)
print(usage)
print("")
print(" -h [ --help ] print this help")
print(" -c [ --connect ] HOST:PORT host and client port of a rethinkdb node to connect")
print(" to (defaults to localhost:28015)")
print(" -a [ --auth ] AUTH_KEY authorization key for rethinkdb clients")
print(" --clients NUM_CLIENTS the number of client connections to use (defaults")
print(" to 8)")
print(" --hard-durability use hard durability writes (slower, but less memory")
print(" consumption on the server)")
print(" --force import data even if a table already exists, and")
print(" overwrite duplicate primary keys")
print(" --fields limit which fields to use when importing one table")
print("")
print("Import directory:")
print(" -d [ --directory ] DIR the directory to import data from")
print(" -i [ --import ] (DB | DB.TABLE) limit restore to the given database or table (may")
print(" be specified multiple times)")
print(" --no-secondary-indexes do not create secondary indexes for the imported tables")
print("")
print("Import file:")
print(" -f [ --file ] FILE the file to import data from")
print(" --table DB.TABLE the table to import the data into")
print(" --format (csv | json) the format of the file (defaults to json)")
print(" --pkey PRIMARY_KEY the field to use as the primary key in the table")
print("")
print("Import CSV format:")
print(" --delimiter CHARACTER character separating fields, or '\\t' for tab")
print(" --no-header do not read in a header of field names")
print(" --custom-header FIELD,FIELD... header to use (overriding file header), must be")
print(" specified if --no-header")
print("")
print("Import JSON format:")
print(" --max-document-size the maximum size in bytes that a single JSON document")
print(" can have (defaults to 134217728).")
print("")
print("EXAMPLES:")
print("")
print("rethinkdb import -d rdb_export -c mnemosyne:39500 --clients 128")
print(" Import data into a cluster running on host 'mnemosyne' with a client port at 39500,")
print(" using 128 client connections and the named export directory.")
print("")
print("rethinkdb import -f site_history.csv --format csv --table test.history --pkey count")
print(" Import data into a local cluster and the table 'history' in the 'test' database,")
print(" using the named CSV file, and using the 'count' field as the primary key.")
print("")
print("rethinkdb import -d rdb_export -c hades -a hunter2 -i test")
print(" Import data into a cluster running on host 'hades' which requires authorization,")
print(" using only the database 'test' from the named export directory.")
print("")
print("rethinkdb import -f subscriber_info.json --fields id,name,hashtag --force")
print(" Import data into a local cluster using the named JSON file, and only the fields")
print(" 'id', 'name', and 'hashtag', overwriting any existing rows with the same primary key.")
print("")
print("rethinkdb import -f user_data.csv --delimiter ';' --no-header --custom-header id,name,number")
print(" Import data into a local cluster using the named CSV file with no header and instead")
print(" use the fields 'id', 'name', and 'number', the delimiter is a semicolon (rather than")
print(" a comma).")
def parse_options():
parser = OptionParser(add_help_option=False, usage=usage)
parser.add_option("-c", "--connect", dest="host", metavar="HOST:PORT", default="localhost:28015", type="string")
parser.add_option("-a", "--auth", dest="auth_key", metavar="AUTHKEY", default="", type="string")
parser.add_option("--fields", dest="fields", metavar="FIELD,FIELD...", default=None, type="string")
parser.add_option("--clients", dest="clients", metavar="NUM_CLIENTS", default=8, type="int")
parser.add_option("--hard-durability", dest="hard", action="store_true", default=False)
parser.add_option("--force", dest="force", action="store_true", default=False)
parser.add_option("--debug", dest="debug", action="store_true", default=False)
parser.add_option("--max-document-size", dest="max_document_size", default=0,type="int")
# Directory import options
parser.add_option("-d", "--directory", dest="directory", metavar="DIRECTORY", default=None, type="string")
parser.add_option("-i", "--import", dest="tables", metavar="DB | DB.TABLE", default=[], action="append", type="string")
parser.add_option("--no-secondary-indexes", dest="create_sindexes", action="store_false", default=True)
# File import options
parser.add_option("-f", "--file", dest="import_file", metavar="FILE", default=None, type="string")
parser.add_option("--format", dest="import_format", metavar="json | csv", default=None, type="string")
parser.add_option("--table", dest="import_table", metavar="DB.TABLE", default=None, type="string")
parser.add_option("--pkey", dest="primary_key", metavar="KEY", default=None, type="string")
parser.add_option("--delimiter", dest="delimiter", metavar="CHARACTER", default=None, type="string")
parser.add_option("--no-header", dest="no_header", action="store_true", default=False)
parser.add_option("--custom-header", dest="custom_header", metavar="FIELD,FIELD...", default=None, type="string")
parser.add_option("-h", "--help", dest="help", default=False, action="store_true")
(options, args) = parser.parse_args()
# Check validity of arguments
if len(args) != 0:
raise RuntimeError("Error: No positional arguments supported. Unrecognized option '%s'" % args[0])
if options.help:
print_import_help()
exit(0)
res = {}
# Verify valid host:port --connect option
(res["host"], res["port"]) = parse_connect_option(options.host)
if options.clients < 1:
raise RuntimeError("Error: --client option too low, must have at least one client connection")
res["auth_key"] = options.auth_key
res["clients"] = options.clients
res["durability"] = "hard" if options.hard else "soft"
res["force"] = options.force
res["debug"] = options.debug
res["create_sindexes"] = options.create_sindexes
# Default behavior for csv files - may be changed by options
res["delimiter"] = ","
res["no_header"] = False
res["custom_header"] = None
# buffer size
if options.max_document_size > 0:
global json_max_buffer_size
json_max_buffer_size=options.max_document_size
if options.directory is not None:
# Directory mode, verify directory import options
if options.import_file is not None:
raise RuntimeError("Error: --file option is not valid when importing a directory")
if options.import_format is not None:
raise RuntimeError("Error: --format option is not valid when importing a directory")
if options.import_table is not None:
raise RuntimeError("Error: --table option is not valid when importing a directory")
if options.primary_key is not None:
raise RuntimeError("Error: --pkey option is not valid when importing a directory")
if options.delimiter is not None:
raise RuntimeError("Error: --delimiter option is not valid when importing a directory")
if options.no_header is not False:
raise RuntimeError("Error: --no-header option is not valid when importing a directory")
if options.custom_header is not None:
raise RuntimeError("Error: --custom-header option is not valid when importing a directory")
# Verify valid directory option
dirname = options.directory
res["directory"] = os.path.abspath(dirname)
if not os.path.exists(res["directory"]):
raise RuntimeError("Error: Directory to import does not exist: %d" % res["directory"])
# Verify valid --import options
res["db_tables"] = parse_db_table_options(options.tables)
# Parse fields
if options.fields is None:
res["fields"] = None
elif len(res["db_tables"]) != 1 or res["db_tables"][0][1] is None:
raise RuntimeError("Error: Can only use the --fields option when importing a single table")
else:
res["fields"] = options.fields.split(",")
elif options.import_file is not None:
# Single file mode, verify file import options
if len(options.tables) != 0:
raise RuntimeError("Error: --import option is not valid when importing a single file")
if options.directory is not None:
raise RuntimeError("Error: --directory option is not valid when importing a single file")
import_file = options.import_file
res["import_file"] = os.path.abspath(import_file)
if not os.path.exists(res["import_file"]):
raise RuntimeError("Error: File to import does not exist: %s" % res["import_file"])
# Verify valid --format option
if options.import_format is None:
options.import_format = os.path.split(options.import_file)[1].split(".")[-1]
if options.import_format not in ["csv", "json"]:
options.import_format = "json"
res["import_format"] = options.import_format
elif options.import_format not in ["csv", "json"]:
raise RuntimeError("Error: Unknown format '%s', valid options are 'csv' and 'json'" % options.import_format)
else:
res["import_format"] = options.import_format
# Verify valid --table option
if options.import_table is None:
raise RuntimeError("Error: Must specify a destination table to import into using the --table option")
res["import_db_table"] = parse_db_table(options.import_table)
if res["import_db_table"][1] is None:
raise RuntimeError("Error: Invalid 'db.table' format: %s" % options.import_table)
# Parse fields
if options.fields is None:
res["fields"] = None
else:
res["fields"] = options.fields.split(",")
if options.import_format == "csv":
if options.delimiter is None:
res["delimiter"] = ","
else:
if len(options.delimiter) == 1:
res["delimiter"] = options.delimiter
elif options.delimiter == "\\t":
res["delimiter"] = "\t"
else:
raise RuntimeError("Error: Must specify only one character for the --delimiter option")
if options.custom_header is None:
res["custom_header"] = None
else:
res["custom_header"] = options.custom_header.split(",")
if options.no_header == True and options.custom_header is None:
raise RuntimeError("Error: Cannot import a CSV file with --no-header and no --custom-header option")
res["no_header"] = options.no_header
else:
if options.delimiter is not None:
raise RuntimeError("Error: --delimiter option is only valid for CSV file formats")
if options.no_header == True:
raise RuntimeError("Error: --no-header option is only valid for CSV file formats")
if options.custom_header is not None:
raise RuntimeError("Error: --custom-header option is only valid for CSV file formats")
res["primary_key"] = options.primary_key
else:
raise RuntimeError("Error: Must specify one of --directory or --file to import")
return res
# This is called through rdb_call_wrapper so reattempts can be tried as long as progress
# is being made, but connection errors occur. We save a failed task in the progress object
# so it can be resumed later on a new connection.
def import_from_queue(progress, conn, task_queue, error_queue, replace_conflicts, durability, write_count):
if progress[0] is not None and not replace_conflicts:
# We were interrupted and it's not ok to overwrite rows, check that the batch either:
# a) does not exist on the server
# b) is exactly the same on the server
task = progress[0]
pkey = r.db(task[0]).table(task[1]).info().run(conn)["primary_key"]
for i in reversed(range(len(task[2]))):
obj = pickle.loads(task[2][i])
if pkey not in obj:
raise RuntimeError("Connection error while importing. Current row has no specified primary key, so cannot guarantee absence of duplicates")
row = r.db(task[0]).table(task[1]).get(obj[pkey]).run(conn)
if row == obj:
write_count[0] += 1
del task[2][i]
else:
raise RuntimeError("Duplicate primary key `%s`:\n%s\n%s" % (pkey, str(obj), str(row)))
task = task_queue.get() if progress[0] is None else progress[0]
while not isinstance(task, StopIteration):
try:
# Unpickle objects (TODO: super inefficient, would be nice if we could pass down json)
objs = [pickle.loads(obj) for obj in task[2]]
conflict_action = 'replace' if replace_conflicts else 'error'
res = r.db(task[0]).table(task[1]).insert(objs, durability=durability, conflict=conflict_action).run(conn)
except:
progress[0] = task
raise
if res["errors"] > 0:
raise RuntimeError("Error when importing into table '%s.%s': %s" %
(task[0], task[1], res["first_error"]))
write_count[0] += len(objs)
task = task_queue.get()
# This is run for each client requested, and accepts tasks from the reader processes
def client_process(host, port, auth_key, task_queue, error_queue, rows_written, replace_conflicts, durability):
try:
conn_fn = lambda: r.connect(host, port, auth_key=auth_key)
write_count = [0]
rdb_call_wrapper(conn_fn, "import", import_from_queue, task_queue, error_queue, replace_conflicts, durability, write_count)
except:
ex_type, ex_class, tb = sys.exc_info()
error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
# Read until the exit event so the readers do not hang on pushing onto the queue
while not isinstance(task_queue.get(), StopIteration):
pass
with rows_written.get_lock():
rows_written.value += write_count[0]
batch_length_limit = 200
batch_size_limit = 500000
class InterruptedError(Exception):
def __str__(self):
return "Interrupted"
# This function is called for each object read from a file by the reader processes
# and will push tasks to the client processes on the task queue
def object_callback(obj, db, table, task_queue, object_buffers, buffer_sizes, fields, exit_event):
global batch_size_limit
global batch_length_limit
if exit_event.is_set():
raise InterruptedError()
if not isinstance(obj, dict):
raise RuntimeError("Error: Invalid input, expected an object, but got %s" % type(obj))
# filter out fields
if fields is not None:
for key in list(obj.keys()):
if key not in fields:
del obj[key]
# Pickle the object here because we want an accurate size, and it'll pickle anyway for IPC
object_buffers.append(pickle.dumps(obj))
buffer_sizes.append(len(object_buffers[-1]))
if len(object_buffers) >= batch_length_limit or sum(buffer_sizes) > batch_size_limit:
task_queue.put((db, table, object_buffers))
del object_buffers[0:len(object_buffers)]
del buffer_sizes[0:len(buffer_sizes)]
return obj
def read_json_array(json_data, file_in, callback, progress_info,
json_array=True):
decoder = json.JSONDecoder()
file_offset = 0
offset = 0
while True:
try:
offset = json.decoder.WHITESPACE.match(json_data, offset).end()
if json_array and json_data[offset] == "]":
break # End of JSON
(obj, offset) = decoder.raw_decode(json_data, idx=offset)
callback(obj)
# Read past whitespace to the next record
file_offset += offset
json_data = json_data[offset:]
offset = json.decoder.WHITESPACE.match(json_data, 0).end()
if json_array and json_data[offset] == ",":
# Read past the comma
offset = json.decoder.WHITESPACE.match(json_data, offset + 1).end()
elif json_array and json_data[offset] != "]":
raise ValueError("Error: JSON format not recognized - expected ',' or ']' after object")
except (ValueError, IndexError):
before_len = len(json_data)
to_read = max(json_read_chunk_size, before_len)
json_data += file_in.read(min(to_read, json_max_buffer_size - before_len))
if json_array and json_data[offset] == ",":
offset = json.decoder.WHITESPACE.match(json_data, offset + 1).end()
elif (not json_array) and before_len == len(json_data):
break # End of JSON
elif before_len == len(json_data) :
raise
elif len(json_data) >= json_max_buffer_size:
raise ValueError("Error: JSON max buffer size exceeded. Use '--max-document-size' to extend your buffer.")
progress_info[0].value = file_offset
# Read the rest of the file and return it so it can be checked for unexpected data
json_data += file_in.read()
return json_data[offset + 1:]
def json_reader(task_queue, filename, db, table, fields, progress_info, exit_event):
object_buffers = []
buffer_sizes = []
with open(filename, "r") as file_in:
# Scan to the first '[', then load objects one-by-one
# Read in the data in chunks, since the json module would just read the whole thing at once
json_data = file_in.read(json_read_chunk_size)
callback = lambda x: object_callback(x, db, table, task_queue, object_buffers,
buffer_sizes, fields, exit_event)
progress_info[1].value = os.path.getsize(filename)
offset = json.decoder.WHITESPACE.match(json_data, 0).end()
if json_data[offset] in "[{":
json_data = read_json_array(
json_data[offset + (1 if json_data[offset] == "[" else 0):],
file_in, callback, progress_info,
json_data[offset] == "[")
else:
raise RuntimeError("Error: JSON format not recognized - file does not begin with an object or array")
# Make sure only remaining data is whitespace
while len(json_data) > 0:
if json.decoder.WHITESPACE.match(json_data, 0).end() != len(json_data):
raise RuntimeError("Error: JSON format not recognized - extra characters found after end of data")
json_data = file_in.read(json_read_chunk_size)
progress_info[0].value = progress_info[1].value
if len(object_buffers) > 0:
task_queue.put((db, table, object_buffers))
# Wrapper classes for the handling of unicode csv files
# Taken from https://docs.python.org/2/library/csv.html
class Utf8Recoder:
def __init__(self, f):
self.reader = codecs.getreader('utf-8')(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class Utf8CsvReader:
def __init__(self, f, **kwargs):
f = Utf8Recoder(f)
self.reader = csv.reader(f, **kwargs)
self.line_num = self.reader.line_num
def next(self):
row = self.reader.next()
self.line_num = self.reader.line_num
return [unicode(s, 'utf-8') for s in row]
def __iter__(self):
return self
def open_csv_file(filename):
if PY3:
return open(filename, 'r', encoding='utf-8', newline='')
else:
return open(filename, 'r')
def csv_reader(task_queue, filename, db, table, options, progress_info, exit_event):
object_buffers = []
buffer_sizes = []
# Count the lines so we can report progress
# TODO: this requires us to make two passes on csv files
line_count = 0
with open_csv_file(filename) as file_in:
for i, l in enumerate(file_in):
pass
line_count = i + 1
progress_info[1].value = line_count
with open_csv_file(filename) as file_in:
if PY3:
reader = csv.reader(file_in, delimiter=options["delimiter"])
else:
reader = Utf8CsvReader(file_in, delimiter=options["delimiter"])
if not options["no_header"]:
fields_in = next(reader)
# Field names may override fields from the header
if options["custom_header"] is not None:
if not options["no_header"]:
print("Ignoring header row: %s" % str(fields_in))
fields_in = options["custom_header"]
elif options["no_header"]:
raise RuntimeError("Error: No field name information available")
for row in reader:
file_line = reader.line_num
progress_info[0].value = file_line
if len(fields_in) != len(row):
raise RuntimeError("Error: File '%s' line %d has an inconsistent number of columns" % (filename, file_line))
# We import all csv fields as strings (since we can't assume the type of the data)
obj = dict(zip(fields_in, row))
for key in list(obj.keys()): # Treat empty fields as no entry rather than empty string
if len(obj[key]) == 0:
del obj[key]
object_callback(obj, db, table, task_queue, object_buffers, buffer_sizes, options["fields"], exit_event)
if len(object_buffers) > 0:
task_queue.put((db, table, object_buffers))
# This function is called through rdb_call_wrapper, which will reattempt if a connection
# error occurs. Progress will resume where it left off.
def create_table(progress, conn, db, table, pkey, sindexes):
if table not in r.db(db).table_list().run(conn):
r.db(db).table_create(table, primary_key=pkey).run(conn)
if progress[0] is None:
progress[0] = 0
# Recreate secondary indexes - assume that any indexes that already exist are wrong
# and create them from scratch
indexes = r.db(db).table(table).index_list().run(conn)
created_indexes = list()
for sindex in sindexes[progress[0]:]:
if isinstance(sindex, dict) and all(k in sindex for k in ('index', 'function')):
if sindex['index'] in indexes:
r.db(db).table(table).index_drop(sindex['index']).run(conn)
r.db(db).table(table).index_create(sindex['index'], sindex['function']).run(conn)
created_indexes.append(sindex['index'])
progress[0] += 1
r.db(db).table(table).index_wait(r.args(created_indexes)).run(conn)
def table_reader(options, file_info, task_queue, error_queue, progress_info, exit_event):
try:
db = file_info["db"]
table = file_info["table"]
primary_key = file_info["info"]["primary_key"]
conn_fn = lambda: r.connect(options["host"], options["port"], auth_key=options["auth_key"])
rdb_call_wrapper(conn_fn, "create table", create_table, db, table, primary_key,
file_info["info"]["indexes"] if options["create_sindexes"] else [])
if file_info["format"] == "json":
json_reader(task_queue,
file_info["file"],
db, table,
options["fields"],
progress_info,
exit_event)
elif file_info["format"] == "csv":
csv_reader(task_queue,
file_info["file"],
db, table,
options,
progress_info,
exit_event)
else:
raise RuntimeError("Error: Unknown file format specified")
except InterruptedError:
pass # Don't save interrupted errors, they are side-effects
except:
ex_type, ex_class, tb = sys.exc_info()
error_queue.put((ex_type, ex_class, traceback.extract_tb(tb), file_info["file"]))
def abort_import(signum, frame, parent_pid, exit_event, task_queue, clients, interrupt_event):
# Only do the abort from the parent process
if os.getpid() == parent_pid:
interrupt_event.set()
exit_event.set()
def print_progress(ratio):
total_width = 40
done_width = int(ratio * total_width)
undone_width = total_width - done_width
print("\r[%s%s] %3d%%" % ("=" * done_width, " " * undone_width, int(100 * ratio)), end=' ')
sys.stdout.flush()
def update_progress(progress_info):
lowest_completion = 1.0
for current, max_count in progress_info:
curr_val = current.value
max_val = max_count.value
if curr_val < 0:
lowest_completion = 0.0
elif max_val <= 0:
lowest_completion = 1.0
else:
lowest_completion = min(lowest_completion, float(curr_val) / max_val)
print_progress(lowest_completion)
def spawn_import_clients(options, files_info):
# Spawn one reader process for each db.table, as well as many client processes
task_queue = SimpleQueue()
error_queue = SimpleQueue()
exit_event = multiprocessing.Event()
interrupt_event = multiprocessing.Event()
errors = []
reader_procs = []
client_procs = []
parent_pid = os.getpid()
signal.signal(signal.SIGINT, lambda a, b: abort_import(a, b, parent_pid, exit_event, task_queue, client_procs, interrupt_event))
try:
progress_info = []
rows_written = multiprocessing.Value(ctypes.c_longlong, 0)
for i in xrange(options["clients"]):
client_procs.append(multiprocessing.Process(target=client_process,
args=(options["host"],
options["port"],
options["auth_key"],
task_queue,
error_queue,
rows_written,
options["force"],
options["durability"])))
client_procs[-1].start()
for file_info in files_info:
progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1), # Current lines/bytes processed
multiprocessing.Value(ctypes.c_longlong, 0))) # Total lines/bytes to process
reader_procs.append(multiprocessing.Process(target=table_reader,
args=(options,
file_info,
task_queue,
error_queue,
progress_info[-1],
exit_event)))
reader_procs[-1].start()
# Wait for all reader processes to finish - hooray, polling
while len(reader_procs) > 0:
time.sleep(0.1)
# If an error has occurred, exit out early
while not error_queue.empty():
exit_event.set()
errors.append(error_queue.get())
reader_procs = [proc for proc in reader_procs if proc.is_alive()]
update_progress(progress_info)
# Wait for all clients to finish
alive_clients = sum([client.is_alive() for client in client_procs])
for i in xrange(alive_clients):
task_queue.put(StopIteration())
while len(client_procs) > 0:
time.sleep(0.1)
client_procs = [client for client in client_procs if client.is_alive()]
# If we were successful, make sure 100% progress is reported
if len(errors) == 0 and not interrupt_event.is_set():
print_progress(1.0)
def plural(num, text):
return "%d %s%s" % (num, text, "" if num == 1 else "s")
# Continue past the progress output line
print("")
print("%s imported in %s" % (plural(rows_written.value, "row"),
plural(len(files_info), "table")))
finally:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if interrupt_event.is_set():
raise RuntimeError("Interrupted")
if len(errors) != 0:
# multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
for error in errors:
print("%s" % error[1], file=sys.stderr)
if options["debug"]:
print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
if len(error) == 4:
print("In file: %s" % error[3], file=sys.stderr)
raise RuntimeError("Errors occurred during import")
def get_import_info_for_file(filename, db_table_filter):
file_info = {}
file_info["file"] = filename
file_info["format"] = os.path.split(filename)[1].split(".")[-1]
file_info["db"] = os.path.split(os.path.split(filename)[0])[1]
file_info["table"] = os.path.split(filename)[1].split(".")[0]
if len(db_table_filter) > 0:
if (file_info["db"], None) not in db_table_filter:
if (file_info["db"], file_info["table"]) not in db_table_filter:
return None
info_filepath = os.path.join(os.path.split(filename)[0], file_info["table"] + ".info")
with open(info_filepath, "r") as info_file:
file_info["info"] = json.load(info_file)
return file_info
def tables_check(progress, conn, files_info, force):
# Ensure that all needed databases exist and tables don't
db_list = r.db_list().run(conn)
for db in set([file_info["db"] for file_info in files_info]):
if db == "rethinkdb":
raise RuntimeError("Error: Cannot import tables into the system database: 'rethinkdb'")
if db not in db_list:
r.db_create(db).run(conn)
# Ensure that all tables do not exist (unless --forced)
already_exist = []
for file_info in files_info:
table = file_info["table"]
db = file_info["db"]
if table in r.db(db).table_list().run(conn):
if not force:
already_exist.append("%s.%s" % (db, table))
extant_pkey = r.db(db).table(table).info().run(conn)["primary_key"]
if file_info["info"]["primary_key"] != extant_pkey:
raise RuntimeError("Error: Table '%s.%s' already exists with a different primary key" % (db, table))
return already_exist
def import_directory(options):
# Scan for all files, make sure no duplicated tables with different formats
dbs = False
db_filter = set([db_table[0] for db_table in options["db_tables"]])
files_to_import = []
files_ignored = []
for root, dirs, files in os.walk(options["directory"]):
if not dbs:
files_ignored.extend([os.path.join(root, f) for f in files])
# The first iteration through should be the top-level directory, which contains the db folders
dbs = True
if len(db_filter) > 0:
for i in reversed(xrange(len(dirs))):
if dirs[i] not in db_filter:
del dirs[i]
else:
if len(dirs) != 0:
files_ignored.extend([os.path.join(root, d) for d in dirs])
del dirs[0:len(dirs)]
for f in files:
split_file = f.split(".")
if len(split_file) != 2 or split_file[1] not in ["json", "csv", "info"]:
files_ignored.append(os.path.join(root, f))
elif split_file[1] == "info":
pass # Info files are included based on the data files
elif not os.access(os.path.join(root, split_file[0] + ".info"), os.F_OK):
files_ignored.append(os.path.join(root, f))
else:
files_to_import.append(os.path.join(root, f))
# For each table to import collect: file, format, db, table, info
files_info = []
for filename in files_to_import:
res = get_import_info_for_file(filename, options["db_tables"])
if res is not None:
files_info.append(res)
# Ensure no two files are for the same db/table, and that all formats are recognized
db_tables = set()
for file_info in files_info:
if (file_info["db"], file_info["table"]) in db_tables:
raise RuntimeError("Error: Duplicate db.table found in directory tree: %s.%s" % (file_info["db"], file_info["table"]))
if file_info["format"] not in ["csv", "json"]:
raise RuntimeError("Error: Unrecognized format for file %s" % file_info["file"])
db_tables.add((file_info["db"], file_info["table"]))
conn_fn = lambda: r.connect(options["host"], options["port"], auth_key=options["auth_key"])
# Make sure this isn't a pre-`reql_admin` cluster - which could result in data loss
# if the user has a database named 'rethinkdb'
rdb_call_wrapper(conn_fn, "version check", check_minimum_version, (1, 16, 0))
already_exist = rdb_call_wrapper(conn_fn, "tables check", tables_check, files_info, options["force"])
if len(already_exist) == 1:
raise RuntimeError("Error: Table '%s' already exists, run with --force to import into the existing table" % already_exist[0])
elif len(already_exist) > 1:
already_exist.sort()
extant_tables = "\n ".join(already_exist)
raise RuntimeError("Error: The following tables already exist, run with --force to import into the existing tables:\n %s" % extant_tables)
# Warn the user about the files that were ignored
if len(files_ignored) > 0:
print("Unexpected files found in the specified directory. Importing a directory expects", file=sys.stderr)
print(" a directory from `rethinkdb export`. If you want to import individual tables", file=sys.stderr)
print(" import them as single files. The following files were ignored:", file=sys.stderr)
for f in files_ignored:
print("%s" % str(f), file=sys.stderr)
spawn_import_clients(options, files_info)
def table_check(progress, conn, db, table, pkey, force):
if db == "rethinkdb":
raise RuntimeError("Error: Cannot import a table into the system database: 'rethinkdb'")
if db not in r.db_list().run(conn):
r.db_create(db).run(conn)
if table in r.db(db).table_list().run(conn):
if not force:
raise RuntimeError("Error: Table already exists, run with --force if you want to import into the existing table")
extant_pkey = r.db(db).table(table).info().run(conn)["primary_key"]
if pkey is not None and pkey != extant_pkey:
raise RuntimeError("Error: Table already exists with a different primary key")
pkey = extant_pkey
else:
if pkey is None:
print("no primary key specified, using default primary key when creating table")
r.db(db).table_create(table).run(conn)
else:
r.db(db).table_create(table, primary_key=pkey).run(conn)
return pkey
def import_file(options):
db = options["import_db_table"][0]
table = options["import_db_table"][1]
pkey = options["primary_key"]
# Ensure that the database and table exist with the right primary key
conn_fn = lambda: r.connect(options["host"], options["port"], auth_key=options["auth_key"])
# Make sure this isn't a pre-`reql_admin` cluster - which could result in data loss
# if the user has a database named 'rethinkdb'
rdb_call_wrapper(conn_fn, "version check", check_minimum_version, (1, 16, 0))
pkey = rdb_call_wrapper(conn_fn, "table check", table_check, db, table, pkey, options["force"])
# Make this up so we can use the same interface as with an import directory
file_info = {}
file_info["file"] = options["import_file"]
file_info["format"] = options["import_format"]
file_info["db"] = db
file_info["table"] = table
file_info["info"] = {"primary_key": pkey, "indexes": []}
spawn_import_clients(options, [file_info])
def main():
try:
options = parse_options()
except RuntimeError as ex:
print("Usage:\n%s" % usage, file=sys.stderr)
print(ex, file=sys.stderr)
return 1
try:
start_time = time.time()
if "directory" in options:
import_directory(options)
elif "import_file" in options:
import_file(options)
else:
raise RuntimeError("Error: Neither --directory or --file specified")
except RuntimeError as ex:
print(ex, file=sys.stderr)
return 1
print(" Done (%d seconds)" % (time.time() - start_time))
return 0
if __name__ == "__main__":
exit(main())
| agpl-3.0 | 2,701,285,710,800,599,600 | 4,151,408,391,024,132,600 | 44.575898 | 156 | 0.596283 | false |
watonyweng/nova | nova/tests/unit/virt/hyperv/test_snapshotops.py | 67 | 5891 | # Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from nova.compute import task_states
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import snapshotops
class SnapshotOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V SnapshotOps class."""
def setUp(self):
super(SnapshotOpsTestCase, self).setUp()
self.context = 'fake_context'
self._snapshotops = snapshotops.SnapshotOps()
self._snapshotops._pathutils = mock.MagicMock()
self._snapshotops._vmutils = mock.MagicMock()
self._snapshotops._vhdutils = mock.MagicMock()
@mock.patch('nova.image.glance.get_remote_image_service')
def test_save_glance_image(self, mock_get_remote_image_service):
image_metadata = {"is_public": False,
"disk_format": "vhd",
"container_format": "bare",
"properties": {}}
glance_image_service = mock.MagicMock()
mock_get_remote_image_service.return_value = (glance_image_service,
mock.sentinel.IMAGE_ID)
self._snapshotops._save_glance_image(context=self.context,
image_id=mock.sentinel.IMAGE_ID,
image_vhd_path=mock.sentinel.PATH)
mock_get_remote_image_service.assert_called_once_with(
self.context, mock.sentinel.IMAGE_ID)
self._snapshotops._pathutils.open.assert_called_with(
mock.sentinel.PATH, 'rb')
glance_image_service.update.assert_called_once_with(
self.context, mock.sentinel.IMAGE_ID, image_metadata,
self._snapshotops._pathutils.open().__enter__())
@mock.patch('nova.virt.hyperv.snapshotops.SnapshotOps._save_glance_image')
def _test_snapshot(self, mock_save_glance_image, base_disk_path):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_update = mock.MagicMock()
fake_src_path = os.path.join('fake', 'path')
self._snapshotops._pathutils.lookup_root_vhd_path.return_value = (
fake_src_path)
fake_exp_dir = os.path.join(os.path.join('fake', 'exp'), 'dir')
self._snapshotops._pathutils.get_export_dir.return_value = fake_exp_dir
self._snapshotops._vhdutils.get_vhd_parent_path.return_value = (
base_disk_path)
fake_snapshot_path = (
self._snapshotops._vmutils.take_vm_snapshot.return_value)
self._snapshotops.snapshot(context=self.context,
instance=mock_instance,
image_id=mock.sentinel.IMAGE_ID,
update_task_state=mock_update)
self._snapshotops._vmutils.take_vm_snapshot.assert_called_once_with(
mock_instance.name)
mock_lookup_path = self._snapshotops._pathutils.lookup_root_vhd_path
mock_lookup_path.assert_called_once_with(mock_instance.name)
mock_get_vhd_path = self._snapshotops._vhdutils.get_vhd_parent_path
mock_get_vhd_path.assert_called_once_with(fake_src_path)
self._snapshotops._pathutils.get_export_dir.assert_called_once_with(
mock_instance.name)
expected = [mock.call(fake_src_path,
os.path.join(fake_exp_dir,
os.path.basename(fake_src_path)))]
dest_vhd_path = os.path.join(fake_exp_dir,
os.path.basename(fake_src_path))
if base_disk_path:
basename = os.path.basename(base_disk_path)
base_dest_disk_path = os.path.join(fake_exp_dir, basename)
expected.append(mock.call(base_disk_path, base_dest_disk_path))
mock_reconnect = self._snapshotops._vhdutils.reconnect_parent_vhd
mock_reconnect.assert_called_once_with(dest_vhd_path,
base_dest_disk_path)
self._snapshotops._vhdutils.merge_vhd.assert_called_once_with(
dest_vhd_path, base_dest_disk_path)
mock_save_glance_image.assert_called_once_with(
self.context, mock.sentinel.IMAGE_ID, base_dest_disk_path)
else:
mock_save_glance_image.assert_called_once_with(
self.context, mock.sentinel.IMAGE_ID, dest_vhd_path)
self._snapshotops._pathutils.copyfile.has_calls(expected)
expected_update = [
mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
mock.call(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)]
mock_update.has_calls(expected_update)
self._snapshotops._vmutils.remove_vm_snapshot.assert_called_once_with(
fake_snapshot_path)
self._snapshotops._pathutils.rmtree.assert_called_once_with(
fake_exp_dir)
def test_snapshot(self):
base_disk_path = os.path.join('fake', 'disk')
self._test_snapshot(base_disk_path=base_disk_path)
def test_snapshot_no_base_disk(self):
self._test_snapshot(base_disk_path=None)
| apache-2.0 | -3,423,641,745,289,531,400 | 1,895,584,435,742,673,700 | 47.68595 | 79 | 0.61874 | false |
darrengarvey/procfs-snapshot | parsers/smaps.py | 1 | 3160 | from model import MemoryRegion
import util
import re
def parse_smaps_header(header):
info = MemoryRegion(free=False)
# Example line is:
# 011e6000-01239000 rw-p 00000000 00:00 0 [heap]
# 8ec00000-8ec01000 rw-s 00000000 00:14 20 /dev/shm/NS2371 (deleted)
# All numbers are hex except for the inode
parts = header.split()
util.LOGGER.debug('Parsing smaps header %s' % header)
# Parse the address range
info.start_addr, info.end_addr = [long(x, 16) for x in parts[0].split('-')]
# Parse the permissions
permissions = parts[1]
info.permissions.readable = "r" in permissions
info.permissions.writable = "w" in permissions
info.permissions.executable = "x" in permissions
info.permissions.private = "p" in permissions
info.permissions.shared = "s" in permissions
info.offset = long(parts[2], 16)
# eg. 08:06
info.major_dev, info.minor_dev = [int(x, 16) for x in parts[3].split(':')]
# The inode isn't a hex number
info.inode = int(parts[4])
# eg. [heap]
# or /dev/shm/NS2371
if len(parts) > 5:
info.name = parts[5]
info.deleted = header.endswith('(deleted)')
return info
_header_re = re.compile('^[0-9a-zA-Z]+-[0-9a-zA-Z]+ .*')
def is_memory_region_header(line):
return re.match(_header_re, line)
def parse_smaps_memory_region(pid, lines, has_header=True):
"""Parse a whole smaps region, which may look like:
7f5c8550e000-7f5c85554000 r--p 00000000 08:06 1309629 /fonts/Arial_Bold.ttf
Size: 280 kB
Rss: 152 kB
Pss: 86 kB
Shared_Clean: 132 kB
Shared_Dirty: 12 kB
Private_Clean: 20 kB
Private_Dirty: 1 kB
Referenced: 152 kB
Anonymous: 2 kB
AnonHugePages: 3 kB
Shared_Hugetlb: 4 kB
Private_Hugetlb: 5 kB
Swap: 6 kB
SwapPss: 7 kB
KernelPageSize: 8 kB
MMUPageSize: 9 kB
Locked: 10 kB
VmFlags: rd mr mw me sd"""
has_header = is_memory_region_header(lines[0])
if has_header:
region = parse_smaps_header(lines[0])
if region.name == '[vsyscall]':
return None
lines = lines[1:]
else:
region = MemoryRegion(free=False)
region.pid = pid
global _smaps_string_mappings
for line in lines:
util.LOGGER.debug('Parsing line: %s' % line)
parts = re.split('[ :]+', line.strip())
if len(parts) < 2:
util.LOGGER.debug('Skipping smaps line that is too short: %s' % line)
elif 'Size' == parts[0]:
# We calculate the size from the address ranges instead.
pass
elif 'VmFlags' == parts[0]:
region.vm_flags = parts[1:]
else:
# All other lines should be an amount of some type of memory.
try:
region.__dict__[util.camel_case_to_underscore(parts[0])] = int(parts[1]) * 1024
except KeyError:
util.LOGGER.warn("Line not recognised: '%s'" % line)
return region
| apache-2.0 | 2,052,549,973,243,894,300 | -3,608,430,993,724,301,000 | 31.244898 | 95 | 0.575633 | false |
flijloku/livestreamer | src/livestreamer_cli/utils/player.py | 23 | 1244 | import os
import sys
from ..compat import shlex_quote
def check_paths(exes, paths):
for path in paths:
for exe in exes:
path = os.path.expanduser(os.path.join(path, exe))
if os.path.isfile(path):
return path
def find_default_player():
if "darwin" in sys.platform:
paths = os.environ.get("PATH", "").split(":")
paths += ["/Applications/VLC.app/Contents/MacOS/"]
paths += ["~/Applications/VLC.app/Contents/MacOS/"]
path = check_paths(("VLC", "vlc"), paths)
elif "win32" in sys.platform:
exename = "vlc.exe"
paths = os.environ.get("PATH", "").split(";")
path = check_paths((exename,), paths)
if not path:
subpath = "VideoLAN\\VLC\\"
envvars = ("PROGRAMFILES", "PROGRAMFILES(X86)", "PROGRAMW6432")
paths = filter(None, (os.environ.get(var) for var in envvars))
paths = (os.path.join(p, subpath) for p in paths)
path = check_paths((exename,), paths)
else:
paths = os.environ.get("PATH", "").split(":")
path = check_paths(("vlc",), paths)
if path:
# Quote command because it can contain space
return shlex_quote(path)
| bsd-2-clause | 8,884,603,674,525,266,000 | 3,792,021,041,409,362,000 | 30.897436 | 75 | 0.561897 | false |
dmarteau/QGIS | python/plugins/db_manager/db_plugins/postgis/plugin.py | 25 | 17115 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from builtins import map
from builtins import range
# this will disable the dbplugin if the connector raise an ImportError
from .connector import PostGisDBConnector
from qgis.PyQt.QtCore import Qt, QRegExp, QCoreApplication
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QAction, QApplication, QMessageBox
from qgis.core import Qgis, QgsApplication, QgsSettings
from qgis.gui import QgsMessageBar
from ..plugin import ConnectionError, InvalidDataException, DBPlugin, Database, Schema, Table, VectorTable, RasterTable, \
TableField, TableConstraint, TableIndex, TableTrigger, TableRule
import re
def classFactory():
return PostGisDBPlugin
class PostGisDBPlugin(DBPlugin):
@classmethod
def icon(self):
return QgsApplication.getThemeIcon("/mIconPostgis.svg")
@classmethod
def typeName(self):
return 'postgis'
@classmethod
def typeNameString(self):
return QCoreApplication.translate('db_manager', 'PostGIS')
@classmethod
def providerName(self):
return 'postgres'
@classmethod
def connectionSettingsKey(self):
return '/PostgreSQL/connections'
def databasesFactory(self, connection, uri):
return PGDatabase(connection, uri)
def connect(self, parent=None):
conn_name = self.connectionName()
settings = QgsSettings()
settings.beginGroup(u"/%s/%s" % (self.connectionSettingsKey(), conn_name))
if not settings.contains("database"): # non-existent entry?
raise InvalidDataException(self.tr('There is no defined database connection "{0}".').format(conn_name))
from qgis.core import QgsDataSourceUri
uri = QgsDataSourceUri()
settingsList = ["service", "host", "port", "database", "username", "password", "authcfg"]
service, host, port, database, username, password, authcfg = [settings.value(x, "", type=str) for x in settingsList]
useEstimatedMetadata = settings.value("estimatedMetadata", False, type=bool)
try:
sslmode = settings.enumValue("sslmode", QgsDataSourceUri.SslPrefer)
except TypeError:
sslmode = QgsDataSourceUri.SslPrefer
settings.endGroup()
if hasattr(authcfg, 'isNull') and authcfg.isNull():
authcfg = ''
if service:
uri.setConnection(service, database, username, password, sslmode, authcfg)
else:
uri.setConnection(host, port, database, username, password, sslmode, authcfg)
uri.setUseEstimatedMetadata(useEstimatedMetadata)
try:
return self.connectToUri(uri)
except ConnectionError:
return False
class PGDatabase(Database):
def __init__(self, connection, uri):
Database.__init__(self, connection, uri)
def connectorsFactory(self, uri):
return PostGisDBConnector(uri, self.connection())
def dataTablesFactory(self, row, db, schema=None):
return PGTable(row, db, schema)
def info(self):
from .info_model import PGDatabaseInfo
return PGDatabaseInfo(self)
def vectorTablesFactory(self, row, db, schema=None):
return PGVectorTable(row, db, schema)
def rasterTablesFactory(self, row, db, schema=None):
return PGRasterTable(row, db, schema)
def schemasFactory(self, row, db):
return PGSchema(row, db)
def sqlResultModel(self, sql, parent):
from .data_model import PGSqlResultModel
return PGSqlResultModel(self, sql, parent)
def sqlResultModelAsync(self, sql, parent):
from .data_model import PGSqlResultModelAsync
return PGSqlResultModelAsync(self, sql, parent)
def registerDatabaseActions(self, mainWindow):
Database.registerDatabaseActions(self, mainWindow)
# add a separator
separator = QAction(self)
separator.setSeparator(True)
mainWindow.registerAction(separator, self.tr("&Table"))
action = QAction(self.tr("Run &Vacuum Analyze"), self)
mainWindow.registerAction(action, self.tr("&Table"), self.runVacuumAnalyzeActionSlot)
action = QAction(self.tr("Run &Refresh Materialized View"), self)
mainWindow.registerAction(action, self.tr("&Table"), self.runRefreshMaterializedViewSlot)
def runVacuumAnalyzeActionSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, Table) or item.isView:
parent.infoBar.pushMessage(self.tr("Select a table for vacuum analyze."), Qgis.Info,
parent.iface.messageTimeout())
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
item.runVacuumAnalyze()
def runRefreshMaterializedViewSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, PGTable) or item._relationType != 'm':
parent.infoBar.pushMessage(self.tr("Select a materialized view for refresh."), Qgis.Info,
parent.iface.messageTimeout())
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
item.runRefreshMaterializedView()
def hasLowercaseFieldNamesOption(self):
return True
def supportsComment(self):
return True
def executeSql(self, sql):
return self.connector._executeSql(sql)
class PGSchema(Schema):
def __init__(self, row, db):
Schema.__init__(self, db)
self.oid, self.name, self.owner, self.perms, self.comment = row
class PGTable(Table):
def __init__(self, row, db, schema=None):
Table.__init__(self, db, schema)
self.name, schema_name, self._relationType, self.owner, self.estimatedRowCount, self.pages, self.comment = row
self.isView = self._relationType in set(['v', 'm'])
self.estimatedRowCount = int(self.estimatedRowCount)
def runVacuumAnalyze(self):
self.aboutToChange.emit()
self.database().connector.runVacuumAnalyze((self.schemaName(), self.name))
# TODO: change only this item, not re-create all the tables in the schema/database
self.schema().refresh() if self.schema() else self.database().refresh()
def runRefreshMaterializedView(self):
self.aboutToChange.emit()
self.database().connector.runRefreshMaterializedView((self.schemaName(), self.name))
# TODO: change only this item, not re-create all the tables in the schema/database
self.schema().refresh() if self.schema() else self.database().refresh()
def runAction(self, action):
action = str(action)
if action.startswith("vacuumanalyze/"):
if action == "vacuumanalyze/run":
self.runVacuumAnalyze()
return True
elif action.startswith("rule/"):
parts = action.split('/')
rule_name = parts[1]
rule_action = parts[2]
msg = self.tr(u"Do you want to {0} rule {1}?").format(rule_action, rule_name)
QApplication.restoreOverrideCursor()
try:
if QMessageBox.question(None, self.tr("Table rule"), msg,
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return False
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
if rule_action == "delete":
self.aboutToChange.emit()
self.database().connector.deleteTableRule(rule_name, (self.schemaName(), self.name))
self.refreshRules()
return True
elif action.startswith("refreshmaterializedview/"):
if action == "refreshmaterializedview/run":
self.runRefreshMaterializedView()
return True
return Table.runAction(self, action)
def tableFieldsFactory(self, row, table):
return PGTableField(row, table)
def tableConstraintsFactory(self, row, table):
return PGTableConstraint(row, table)
def tableIndexesFactory(self, row, table):
return PGTableIndex(row, table)
def tableTriggersFactory(self, row, table):
return PGTableTrigger(row, table)
def tableRulesFactory(self, row, table):
return PGTableRule(row, table)
def info(self):
from .info_model import PGTableInfo
return PGTableInfo(self)
def crs(self):
return self.database().connector.getCrs(self.srid)
def tableDataModel(self, parent):
from .data_model import PGTableDataModel
return PGTableDataModel(self, parent)
def delete(self):
self.aboutToChange.emit()
if self.isView:
ret = self.database().connector.deleteView((self.schemaName(), self.name), self._relationType == 'm')
else:
ret = self.database().connector.deleteTable((self.schemaName(), self.name))
if not ret:
self.deleted.emit()
return ret
class PGVectorTable(PGTable, VectorTable):
def __init__(self, row, db, schema=None):
PGTable.__init__(self, row[:-4], db, schema)
VectorTable.__init__(self, db, schema)
self.geomColumn, self.geomType, self.geomDim, self.srid = row[-4:]
def info(self):
from .info_model import PGVectorTableInfo
return PGVectorTableInfo(self)
def runAction(self, action):
if PGTable.runAction(self, action):
return True
return VectorTable.runAction(self, action)
class PGRasterTable(PGTable, RasterTable):
def __init__(self, row, db, schema=None):
PGTable.__init__(self, row[:-6], db, schema)
RasterTable.__init__(self, db, schema)
self.geomColumn, self.pixelType, self.pixelSizeX, self.pixelSizeY, self.isExternal, self.srid = row[-6:]
self.geomType = 'RASTER'
def info(self):
from .info_model import PGRasterTableInfo
return PGRasterTableInfo(self)
def uri(self, uri=None):
"""Returns the datasource URI for postgresraster provider"""
if not uri:
uri = self.database().uri()
service = (u'service=\'%s\'' % uri.service()) if uri.service() else ''
dbname = (u'dbname=\'%s\'' % uri.database()) if uri.database() else ''
host = (u'host=%s' % uri.host()) if uri.host() else ''
user = (u'user=%s' % uri.username()) if uri.username() else ''
passw = (u'password=%s' % uri.password()) if uri.password() else ''
port = (u'port=%s' % uri.port()) if uri.port() else ''
schema = self.schemaName() if self.schemaName() else 'public'
table = '"%s"."%s"' % (schema, self.name)
if not dbname:
# postgresraster provider *requires* a dbname
connector = self.database().connector
r = connector._execute(None, "SELECT current_database()")
dbname = (u'dbname=\'%s\'' % connector._fetchone(r)[0])
connector._close_cursor(r)
# Find first raster field
col = ''
for fld in self.fields():
if fld.dataType == "raster":
col = u'column=\'%s\'' % fld.name
break
uri = u'%s %s %s %s %s %s %s table=%s' % \
(service, dbname, host, user, passw, port, col, table)
return uri
def mimeUri(self):
uri = u"raster:postgresraster:{}:{}".format(self.name, re.sub(":", r"\:", self.uri()))
return uri
def toMapLayer(self):
from qgis.core import QgsRasterLayer, QgsContrastEnhancement, QgsDataSourceUri, QgsCredentials
rl = QgsRasterLayer(self.uri(), self.name, "postgresraster")
if not rl.isValid():
err = rl.error().summary()
uri = QgsDataSourceUri(self.database().uri())
conninfo = uri.connectionInfo(False)
username = uri.username()
password = uri.password()
for i in range(3):
(ok, username, password) = QgsCredentials.instance().get(conninfo, username, password, err)
if ok:
uri.setUsername(username)
uri.setPassword(password)
rl = QgsRasterLayer(self.uri(uri), self.name)
if rl.isValid():
break
if rl.isValid():
rl.setContrastEnhancement(QgsContrastEnhancement.StretchToMinimumMaximum)
return rl
class PGTableField(TableField):
def __init__(self, row, table):
TableField.__init__(self, table)
self.num, self.name, self.dataType, self.charMaxLen, self.modifier, self.notNull, self.hasDefault, self.default, typeStr = row
self.primaryKey = False
# get modifier (e.g. "precision,scale") from formatted type string
trimmedTypeStr = typeStr.strip()
regex = QRegExp("\\((.+)\\)$")
startpos = regex.indexIn(trimmedTypeStr)
if startpos >= 0:
self.modifier = regex.cap(1).strip()
else:
self.modifier = None
# find out whether fields are part of primary key
for con in self.table().constraints():
if con.type == TableConstraint.TypePrimaryKey and self.num in con.columns:
self.primaryKey = True
break
def getComment(self):
"""Returns the comment for a field"""
tab = self.table()
# SQL Query checking if a comment exists for the field
sql_cpt = "Select count(*) from pg_description pd, pg_class pc, pg_attribute pa where relname = '%s' and attname = '%s' and pa.attrelid = pc.oid and pd.objoid = pc.oid and pd.objsubid = pa.attnum" % (tab.name, self.name)
# SQL Query that return the comment of the field
sql = "Select pd.description from pg_description pd, pg_class pc, pg_attribute pa where relname = '%s' and attname = '%s' and pa.attrelid = pc.oid and pd.objoid = pc.oid and pd.objsubid = pa.attnum" % (tab.name, self.name)
c = tab.database().connector._execute(None, sql_cpt) # Execute Check query
res = tab.database().connector._fetchone(c)[0] # Store result
if res == 1:
# When a comment exists
c = tab.database().connector._execute(None, sql) # Execute query
res = tab.database().connector._fetchone(c)[0] # Store result
tab.database().connector._close_cursor(c) # Close cursor
return res # Return comment
else:
return ''
class PGTableConstraint(TableConstraint):
def __init__(self, row, table):
TableConstraint.__init__(self, table)
self.name, constr_type_str, self.isDefferable, self.isDeffered, columns = row[:5]
self.columns = list(map(int, columns.split(' ')))
if constr_type_str in TableConstraint.types:
self.type = TableConstraint.types[constr_type_str]
else:
self.type = TableConstraint.TypeUnknown
if self.type == TableConstraint.TypeCheck:
self.checkSource = row[5]
elif self.type == TableConstraint.TypeForeignKey:
self.foreignTable = row[6]
self.foreignOnUpdate = TableConstraint.onAction[row[7]]
self.foreignOnDelete = TableConstraint.onAction[row[8]]
self.foreignMatchType = TableConstraint.matchTypes[row[9]]
self.foreignKeys = row[10]
class PGTableIndex(TableIndex):
def __init__(self, row, table):
TableIndex.__init__(self, table)
self.name, columns, self.isUnique = row
self.columns = list(map(int, columns.split(' ')))
class PGTableTrigger(TableTrigger):
def __init__(self, row, table):
TableTrigger.__init__(self, table)
self.name, self.function, self.type, self.enabled = row
class PGTableRule(TableRule):
def __init__(self, row, table):
TableRule.__init__(self, table)
self.name, self.definition = row
| gpl-2.0 | 9,173,885,539,157,001,000 | -9,148,424,896,610,745,000 | 35.414894 | 230 | 0.604499 | false |
raygeeknyc/ohgee | visionanalyzer.py | 1 | 19670 | #!/usr/bin/python3
import logging
# Used only if this is run as main
_DEBUG = logging.DEBUG
SENTIMENT_CONFIDENCE_THRESHOLD = 0.25
GOOD_SENTIMENT_THRESHOLD = SENTIMENT_CONFIDENCE_THRESHOLD
BAD_SENTIMENT_THRESHOLD = -1*SENTIMENT_CONFIDENCE_THRESHOLD
# Import the packages we need for drawing and displaying images
from PIL import Image, ImageDraw
# Imports the Google Cloud client packages we need
from google.cloud import vision
# Enumerate the likelihood names that are defined by Cloud Vision 1
LIKELIHOOD_NAMES = {'UNKNOWN':0, 'VERY_UNLIKELY':1, 'UNLIKELY':2, 'POSSIBLE':3,
'LIKELY':4, 'VERY_LIKELY':5}
from picamera import PiCamera
import multiprocessing
from multiprocessingloghandler import ParentMultiProcessingLogHandler
from multiprocessingloghandler import ChildMultiProcessingLogHandler
from random import randint
import io
import sys
import os
import time
import signal
import queue
import threading
# This is the desired resolution of the Pi camera
RESOLUTION = (600, 400)
CAPTURE_RATE_FPS = 2
# This is over an observed covered camera's noise
TRAINING_SAMPLES = 5
# This is how much the green channel has to change to consider a pixel changed
PIXEL_SHIFT_SENSITIVITY = 30
# This is the portion of pixels to compare when detecting motion
MOTION_DETECT_SAMPLE = 1.0/20 # so... 5%? (Kudos to Sarah Cooper)
# This is how long to sleep in various threads between shutdown checks
POLL_SECS = 0.1
# This is the rate at which to send frames to the vision service
ANALYSIS_RATE_FPS = 1
_ANALYSIS_DELAY_SECS = 1.0/ANALYSIS_RATE_FPS
COLOR_MEH = (0, 0, 127)
COLOR_BAD = (200, 0, 0)
COLOR_GOOD = (0, 200, 0)
COLOR_FEATURES = (255,255,255)
def signal_handler(sig, frame):
global STOP
if STOP:
signal.signal(signal.SIGINT, signal.SIG_IGN)
os.kill(os.getpid(), signal.SIGTERM)
logging.debug("SIGINT")
STOP = True
signal.signal(signal.SIGINT, signal_handler)
EMPTY_LABELS = []
BAD_MOOD_GREETINGS = (["don't", "worry", "be", "happy"], ["I'm", "sorry", "that", "you're", "not", "feeling", "happy"], ["You", "look", "down"], ["I", "hope", "that", "I", "can", "cheer", "you", "up"], ["I", "hope", "that", "you", "feel", "better", "soon"], ["Smile!"])
GOOD_MOOD_GREETINGS = (["I'm", "glad", "that", "you", "are", "happy"], ["You", "look", "happy"], ["You", "cheer", "me", "up"], ["It's", "great", "to", "see", "you", "happy"], ["Great", "day"])
DOG_LABELS = ["dog", "canine"]
DOG_GREETINGS = (["here", "doggie"], ["hi","puppy"], ["hello", "puppy"], ["woof", "woof"], ["bark", "bark"], ["good", "puppy"], ["good", "puppy"], ["nice", "doggie"])
CAT_LABELS = ["cat", "feline"]
CAT_GREETINGS = (["meow"], ["meow", "meow"], ["nice", "kitty"], ["what", "a", "nice", "cat"])
HAT_LABELS = ["hat", "cap", "headgear"]
HAT_GREETINGS = (["that's", "a", "nice", "hat"], ["nice", "hat"], ["nice", "cap"], ["I", "like", "your", "hat"])
COFFEE_LABELS = ["espresso", "cup", "mug", "coffee", "coffee cup", "drinkware"]
COFFEE_GREETINGS = (["is", "that", "a", "cup", "of", "good", "coffee"], ["I", "love", "coffee", "too"], ["I", "hope", "that", "you", "enjoy", "your", "coffee"])
EYEGLASS_LABELS = ["glasses", "eyewear"]
EYEGLASS_GREETINGS = (["those", "are", "nice", "eye", "glasses"], ["I", "like", "your", "glasses"], ["nice", "glasses"], ["nice", "eye", "glasses"], [], [], [], [])
FLOWER_LABELS = ["flowers", "flower", "floral"]
FLOWER_GREETINGS = (["what", "a", "pretty", "flower"], ["nice", "flowers"], [])
# Only the first label found in tags will be used, so prioritize them in this list
LABELS_GREETINGS = [(DOG_LABELS, DOG_GREETINGS, EMPTY_LABELS, True),
(CAT_LABELS, CAT_GREETINGS, EMPTY_LABELS, False),
(HAT_LABELS, HAT_GREETINGS, EMPTY_LABELS, False),
(FLOWER_LABELS, FLOWER_GREETINGS, EMPTY_LABELS, False),
(COFFEE_LABELS, COFFEE_GREETINGS, EMPTY_LABELS, False),
(EYEGLASS_LABELS, EYEGLASS_GREETINGS, EMPTY_LABELS, False)]
def randomGreetingFrom(phrases):
if not phrases: return []
return phrases[randint(0,len(phrases)-1)]
def getBadMoodGreeting():
return (randomGreetingFrom(BAD_MOOD_GREETINGS), False)
def getGoodMoodGreeting():
return (randomGreetingFrom(GOOD_MOOD_GREETINGS), False)
# Return the first label of the set that a match was found in
# but a match was not found in excludes
def getGreetingForLabels(labels):
for tags, greetings, excludes, wave_flag in LABELS_GREETINGS:
for label in labels:
logging.debug("label: {}".format(label))
matched_label_text = labelMatch(labels, tags)
if matched_label_text:
matched_exclude = labelMatch(labels, excludes)
if not matched_exclude:
return (randomGreetingFrom(greetings), wave_flag, tags[0])
return None
def labelMatch(labels,tags):
for candidate_label in labels:
if candidate_label in tags:
return candidate_label
return None
# Sentiment is -1, 0 or +1 for this sentiment and level
# -1 == bad, 0 == meh, +1 == good
def getSentimentForLevel(face, level):
if face.joy_likelihood == level or face.surprise_likelihood == level:
logging.debug("getSentimentForLevel: %s joy: %s surprise: %s" % (str(level), str(face.joy_likelihood), str(face.surprise_likelihood)))
return 1.0
if face.anger_likelihood == level or face.sorrow_likelihood == level:
logging.debug("getSentimentForLevel: %s anger: %s sorrow: %s" % (str(level), str(face.anger_likelihood), str(face.sorrow_likelihood)))
return -1.0
return 0.0
def getSentimentWeightedByLevel(face):
logging.debug("joy: {}, surprise:{}, anger:{}, sorrow:{}".format(
face.joy_likelihood, face.surprise_likelihood, face.anger_likelihood, face.sorrow_likelihood))
sentiment = getSentimentForLevel(face, LIKELIHOOD_NAMES['VERY_LIKELY'])
if sentiment != 0:
return sentiment
sentiment = getSentimentForLevel(face, LIKELIHOOD_NAMES['LIKELY'])
if sentiment != 0:
return sentiment * SENTIMENT_CONFIDENCE_THRESHOLD
sentiment = getSentimentForLevel(face, LIKELIHOOD_NAMES['POSSIBLE'])
if sentiment != 0:
return sentiment * SENTIMENT_CONFIDENCE_THRESHOLD
sentiment = getSentimentForLevel(face, LIKELIHOOD_NAMES['UNLIKELY'])
if sentiment != 0:
return sentiment * 0.25
return 0.0
class ImageAnalyzer(multiprocessing.Process):
def __init__(self, vision_queue, log_queue, logging_level):
multiprocessing.Process.__init__(self)
self._log_queue = log_queue
self._logging_level = logging_level
self._exit = multiprocessing.Event()
self._vision_queue, _ = vision_queue
self._stop_capturing = False
self._stop_analyzing = False
self._last_frame_at = 0.0
self._frame_delay_secs = 1.0/CAPTURE_RATE_FPS
def stop(self):
logging.debug("***analysis received shutdown")
self._exit.set()
def _initLogging(self):
handler = ChildMultiProcessingLogHandler(self._log_queue)
logging.getLogger(str(os.getpid())).addHandler(handler)
logging.getLogger(str(os.getpid())).setLevel(self._logging_level)
def capturePilFrame(self):
s=time.time()
self._image_buffer.seek(0)
self._camera.capture(self._image_buffer, format="jpeg", use_video_port=True)
self._image_buffer.seek(0)
image = Image.open(self._image_buffer)
image_pixels = image.load()
image = self._image_buffer.getvalue()
self._last_frame_at = time.time()
logging.debug("capturePilFrame took {}".format(time.time()-s))
return (image, image_pixels)
def getNextFrame(self):
delay = (self._last_frame_at + self._frame_delay_secs) - time.time()
if delay > 0:
time.sleep(delay)
self._current_frame = self.capturePilFrame()
def calculateImageDifference(self, change_threshold=None, sample_percentage=MOTION_DETECT_SAMPLE):
"""
Detect changes in the green channel.
Sample sample_percentage of pixels, evenly distributed throughout
the image's pixel map.
If change_threshold is specified, exit once it's reached.
"""
s = time.time()
changed_pixels = 0
sample_size = sample_percentage * self._camera.resolution[0] * self._camera.resolution[1]
step_size = self._camera.resolution[0] * self._camera.resolution[1] / sample_size
# We choose the "most square" sampling interval to avoid sampling one or few stripes
if self._camera.resolution[0] < self._camera.resolution[1]:
y_step = int(sample_size / self._camera.resolution[0])
x_step = 1
else:
x_step = int(sample_size / self._camera.resolution[0])
y_step = 1
logging.debug("Motion threshold, pct, size, step_size, x_step, y_step: {},{},{},{},{},{}".format(change_threshold, sample_percentage, sample_size, step_size, x_step, y_step))
samples = 0
for x in range(0, self._camera.resolution[0], x_step):
for y in range(0, self._camera.resolution[1], y_step):
samples += 1
if abs(self._current_frame[1][x,y][1] - self._prev_frame[1][x,y][1]) > PIXEL_SHIFT_SENSITIVITY:
changed_pixels += 1
if change_threshold and changed_pixels > change_threshold:
logging.debug("reached threshold: {}, {} secs".format(changed_pixels, time.time()-s))
return changed_pixels
logging.debug("calculated change: {}, {} secs".format(changed_pixels, time.time()-s))
return changed_pixels
def imageDifferenceOverThreshold(self, changed_pixels_threshold):
"Are there more changed pixels than we've established as a lower bound for motion?"
changed_pixels = self.calculateImageDifference(changed_pixels_threshold)
return changed_pixels > changed_pixels_threshold
def trainMotion(self):
logging.debug("Training motion")
trained = False
try:
self._camera.start_preview(fullscreen=False, window=(100,100,self._camera.resolution[0], self._camera.resolution[1]))
self._motion_threshold = 9999
self.getNextFrame()
for i in range(TRAINING_SAMPLES):
self._prev_frame = self._current_frame
self.getNextFrame()
motion = self.calculateImageDifference()
self._motion_threshold = min(motion, self._motion_threshold)
trained = True
finally:
self._camera.stop_preview()
logging.debug("Trained {}".format(trained))
return trained
def run(self):
self._initLogging()
try:
self._frames = queue.Queue()
self._stop_capturing = False
self._stop_analyzing = False
self._capturer = threading.Thread(target=self.captureFrames)
self._capturer.start()
self._analyzer = threading.Thread(target=self.analyzeVision)
self._analyzer.start()
while not self._exit.is_set():
time.sleep(POLL_SECS)
logging.debug("Shutting down threads")
self._stop_capturing = True
self._capturer.join()
self._stop_analyzing = True
self._analyzer.join()
except Exception:
logging.exception("Error in vision main thread")
finally:
logging.debug("Exiting vision")
sys.exit(0)
def analyzeVision(self):
self._vision_client = vision.ImageAnnotatorClient()
skipped_images = 0
frame = None
while not self._stop_analyzing:
try:
frame = self._frames.get(block=False)
skipped_images += 1
except queue.Empty:
if not frame:
logging.debug("Empty image queue, waiting")
skipped_images = 0
time.sleep(POLL_SECS)
else:
skipped_images -= 1
logging.debug("Trailing frame read, skipped {} frames".format(skipped_images))
try:
results = self._analyzeFrame(frame)
buffer = io.BytesIO()
results[0].save(buffer, format="JPEG")
buffer.seek(0)
img_bytes = buffer.getvalue()
logging.debug("send image %s" % type(img_bytes))
self._vision_queue.send((img_bytes, results[1], results[2], results[3], results[4]))
except Exception:
logging.exception("error reading image")
finally:
frame = None
self._vision_queue.close()
logging.debug("Exiting vision analyze thread")
def _analyzeFrame(self, frame):
s=time.time()
logging.debug("analyzing image")
remote_image = vision.Image(content=frame[0])
labels = self._vision_client.label_detection(image=remote_image).label_annotations
faces = self._vision_client.face_detection(image=remote_image, image_context=None,
max_results=2).face_annotations
faces_details = findFacesDetails(faces)
im = Image.open(io.BytesIO(frame[0]))
size = im.size[0] * im.size[1]
canvas = ImageDraw.Draw(im)
obscureFacesWithSentiments(canvas, faces_details)
strongest_sentiment = 0.0
max_confidence = 0.0
max_area = 0.0
for face_detail in faces_details:
if face_detail[3] > max_area:
max_area = face_detail[3]
if face_detail[2] > max_confidence:
max_confidence = face_detail[2]
strongest_sentiment = face_detail[0]
logging.debug("sentiment:{}".format(strongest_sentiment))
logging.debug("_analyzeFrame took {}".format(time.time()-s))
max_area_portion = (max_area * 1.0) / size
label_descriptions = [label.description for label in labels]
return (im, label_descriptions, faces_details, strongest_sentiment, max_area_portion)
def captureFrames(self):
self._image_buffer = io.BytesIO()
self._camera = PiCamera()
self._camera.resolution = RESOLUTION
self._camera.vflip = True
prev_array = None
logging.info("Training motion detection")
for retry in range(3):
if self.trainMotion():
break
logging.info("Trained motion detection {}".format(self._motion_threshold))
while not self._stop_capturing:
try:
self.getNextFrame()
if self.imageDifferenceOverThreshold(self._motion_threshold):
logging.debug("Motion detected")
self._frames.put(self._current_frame)
self._prev_frame = self._current_frame
self.getNextFrame()
except Exception as e:
logging.error("Error in analysis: {}".format(e))
logging.debug("Exiting vision capture thread")
self._camera.close()
def findFacesDetails(faces):
faces_details = []
if faces:
for face in faces:
top = 9999
left = 9999
bottom = 0
right = 0
for point in face.bounding_poly.vertices:
top = min(top, point.y)
left = min(left, point.x)
bottom = max(bottom, point.y)
right = max(right, point.x)
sentiment = getSentimentWeightedByLevel(face)
area = abs(bottom - top) * abs(right - left)
faces_details.append((sentiment, ((left, top), (right, bottom)), face.detection_confidence, area))
return faces_details
def getColorForSentiment(sentiment):
if sentiment < 0:
return COLOR_BAD
if sentiment > 0:
return COLOR_GOOD
return COLOR_MEH
def watchForResults(vision_results_queue):
global STOP
_, incoming_results = vision_results_queue
try:
while True:
image, labels, faces_details, sentiment, max_area_portion = incoming_results.recv()
logging.debug("{} faces detected".format(len(faces_details)))
for label in labels:
logging.debug("label: {}".format(label))
except EOFError:
logging.debug("Done watching")
def obscureFacesWithSentiments(canvas, faces_details):
for face_sentiment, face_boundary, _, _ in faces_details:
sentiment_color = getColorForSentiment(face_sentiment)
canvas.ellipse(face_boundary, fill=sentiment_color, outline=None)
eye_size = max(1, (face_boundary[1][0] - face_boundary[0][0]) / 50)
nose_size = 2*eye_size
eye_level = face_boundary[0][1] + (face_boundary[1][1] - face_boundary[0][1])/3.0
nose_level = face_boundary[0][1] + (face_boundary[1][1] - face_boundary[0][1])/2.0
mouth_size_h = (face_boundary[1][0] - face_boundary[0][0])/2.0
mouth_size_v = (face_boundary[1][1] - nose_level)/2.0
mouth_size = min(mouth_size_v, mouth_size_h)
mouth_inset = ((face_boundary[1][0]-face_boundary[0][0])-mouth_size)/2
canvas.ellipse((face_boundary[0][0]+((face_boundary[1][0] - face_boundary[0][0])/3.0)-eye_size, eye_level-eye_size, face_boundary[0][0]+((face_boundary[1][0]-face_boundary[0][0])/3.0)+eye_size, eye_level + eye_size), None, outline=COLOR_FEATURES)
canvas.ellipse((face_boundary[0][0]+((face_boundary[1][0] - face_boundary[0][0])/3.0)*2-eye_size, eye_level-eye_size, face_boundary[0][0]+((face_boundary[1][0] - face_boundary[0][0])/3.0)*2+eye_size, eye_level+eye_size), None, outline=COLOR_FEATURES)
canvas.ellipse((face_boundary[0][0]+((face_boundary[1][0] - face_boundary[0][0])/2.0)-nose_size, nose_level-nose_size, face_boundary[0][0]+((face_boundary[1][0] - face_boundary[0][0])/2.0)+nose_size, nose_level+nose_size), outline=COLOR_FEATURES, fill=COLOR_FEATURES)
if sentiment_color == COLOR_GOOD:
canvas.chord(( face_boundary[0][0]+mouth_inset, nose_level, face_boundary[0][0]+mouth_inset+mouth_size, nose_level+mouth_size), 35, 135, fill=COLOR_FEATURES, outline=COLOR_FEATURES)
elif sentiment_color == COLOR_BAD:
canvas.chord(( face_boundary[0][0]+mouth_inset, face_boundary[1][1]-(face_boundary[1][1]-nose_level)*0.67, face_boundary[0][0]+mouth_inset+mouth_size, face_boundary[1][1]), 215, 335, fill=COLOR_FEATURES, outline=COLOR_FEATURES)
if __name__ == '__main__':
global STOP
STOP = False
log_stream = sys.stderr
log_queue = multiprocessing.Queue(100)
handler = ParentMultiProcessingLogHandler(logging.StreamHandler(log_stream), log_queue)
logging.getLogger('').addHandler(handler)
logging.getLogger('').setLevel(_DEBUG)
vision_results_queue = multiprocessing.Pipe()
vision_worker = ImageAnalyzer(vision_results_queue, log_queue, logging.getLogger('').getEffectiveLevel())
try:
logging.debug("Starting image analysis")
vision_worker.start()
unused, _ = vision_results_queue
unused.close()
watcher = threading.Thread(target = watchForResults, args=(vision_results_queue,))
watcher.start()
while not STOP:
time.sleep(POLL_SECS)
except Exception:
logging.exception("Main exception")
finally:
logging.debug("Ending")
vision_worker.stop()
vision_worker.join()
logging.debug("background process returned, exiting main process")
sys.exit(0)
| gpl-3.0 | 3,153,951,838,464,511,500 | 360,413,887,916,538,200 | 43.401806 | 275 | 0.618607 | false |
neilLasrado/erpnext | erpnext/accounts/doctype/journal_entry/test_journal_entry.py | 14 | 11145 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest, frappe
from frappe.utils import flt, nowdate
from erpnext.accounts.doctype.account.test_account import get_inventory_account
from erpnext.exceptions import InvalidAccountCurrency
class TestJournalEntry(unittest.TestCase):
def test_journal_entry_with_against_jv(self):
jv_invoice = frappe.copy_doc(test_records[2])
base_jv = frappe.copy_doc(test_records[0])
self.jv_against_voucher_testcase(base_jv, jv_invoice)
def test_jv_against_sales_order(self):
from erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order
sales_order = make_sales_order(do_not_save=True)
base_jv = frappe.copy_doc(test_records[0])
self.jv_against_voucher_testcase(base_jv, sales_order)
def test_jv_against_purchase_order(self):
from erpnext.buying.doctype.purchase_order.test_purchase_order import create_purchase_order
purchase_order = create_purchase_order(do_not_save=True)
base_jv = frappe.copy_doc(test_records[1])
self.jv_against_voucher_testcase(base_jv, purchase_order)
def jv_against_voucher_testcase(self, base_jv, test_voucher):
dr_or_cr = "credit" if test_voucher.doctype in ["Sales Order", "Journal Entry"] else "debit"
test_voucher.insert()
test_voucher.submit()
if test_voucher.doctype == "Journal Entry":
self.assertTrue(frappe.db.sql("""select name from `tabJournal Entry Account`
where account = %s and docstatus = 1 and parent = %s""",
("_Test Receivable - _TC", test_voucher.name)))
self.assertFalse(frappe.db.sql("""select name from `tabJournal Entry Account`
where reference_type = %s and reference_name = %s""", (test_voucher.doctype, test_voucher.name)))
base_jv.get("accounts")[0].is_advance = "Yes" if (test_voucher.doctype in ["Sales Order", "Purchase Order"]) else "No"
base_jv.get("accounts")[0].set("reference_type", test_voucher.doctype)
base_jv.get("accounts")[0].set("reference_name", test_voucher.name)
base_jv.insert()
base_jv.submit()
submitted_voucher = frappe.get_doc(test_voucher.doctype, test_voucher.name)
self.assertTrue(frappe.db.sql("""select name from `tabJournal Entry Account`
where reference_type = %s and reference_name = %s and {0}=400""".format(dr_or_cr),
(submitted_voucher.doctype, submitted_voucher.name)))
if base_jv.get("accounts")[0].is_advance == "Yes":
self.advance_paid_testcase(base_jv, submitted_voucher, dr_or_cr)
self.cancel_against_voucher_testcase(submitted_voucher)
def advance_paid_testcase(self, base_jv, test_voucher, dr_or_cr):
#Test advance paid field
advance_paid = frappe.db.sql("""select advance_paid from `tab%s`
where name=%s""" % (test_voucher.doctype, '%s'), (test_voucher.name))
payment_against_order = base_jv.get("accounts")[0].get(dr_or_cr)
self.assertTrue(flt(advance_paid[0][0]) == flt(payment_against_order))
def cancel_against_voucher_testcase(self, test_voucher):
if test_voucher.doctype == "Journal Entry":
# if test_voucher is a Journal Entry, test cancellation of test_voucher
test_voucher.cancel()
self.assertFalse(frappe.db.sql("""select name from `tabJournal Entry Account`
where reference_type='Journal Entry' and reference_name=%s""", test_voucher.name))
elif test_voucher.doctype in ["Sales Order", "Purchase Order"]:
# if test_voucher is a Sales Order/Purchase Order, test error on cancellation of test_voucher
submitted_voucher = frappe.get_doc(test_voucher.doctype, test_voucher.name)
self.assertRaises(frappe.LinkExistsError, submitted_voucher.cancel)
def test_jv_against_stock_account(self):
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
set_perpetual_inventory()
jv = frappe.copy_doc(test_records[0])
jv.get("accounts")[0].update({
"account": get_inventory_account('_Test Company'),
"company": "_Test Company",
"party_type": None,
"party": None
})
jv.insert()
from erpnext.accounts.general_ledger import StockAccountInvalidTransaction
self.assertRaises(StockAccountInvalidTransaction, jv.submit)
set_perpetual_inventory(0)
def test_multi_currency(self):
jv = make_journal_entry("_Test Bank USD - _TC",
"_Test Bank - _TC", 100, exchange_rate=50, save=False)
jv.get("accounts")[1].credit_in_account_currency = 5000
jv.submit()
gl_entries = frappe.db.sql("""select account, account_currency, debit, credit,
debit_in_account_currency, credit_in_account_currency
from `tabGL Entry` where voucher_type='Journal Entry' and voucher_no=%s
order by account asc""", jv.name, as_dict=1)
self.assertTrue(gl_entries)
expected_values = {
"_Test Bank USD - _TC": {
"account_currency": "USD",
"debit": 5000,
"debit_in_account_currency": 100,
"credit": 0,
"credit_in_account_currency": 0
},
"_Test Bank - _TC": {
"account_currency": "INR",
"debit": 0,
"debit_in_account_currency": 0,
"credit": 5000,
"credit_in_account_currency": 5000
}
}
for field in ("account_currency", "debit", "debit_in_account_currency", "credit", "credit_in_account_currency"):
for i, gle in enumerate(gl_entries):
self.assertEqual(expected_values[gle.account][field], gle[field])
# cancel
jv.cancel()
gle = frappe.db.sql("""select name from `tabGL Entry`
where voucher_type='Sales Invoice' and voucher_no=%s""", jv.name)
self.assertFalse(gle)
def test_disallow_change_in_account_currency_for_a_party(self):
# create jv in USD
jv = make_journal_entry("_Test Bank USD - _TC",
"_Test Receivable USD - _TC", 100, save=False)
jv.accounts[1].update({
"party_type": "Customer",
"party": "_Test Customer USD"
})
jv.submit()
# create jv in USD, but account currency in INR
jv = make_journal_entry("_Test Bank - _TC",
"_Test Receivable - _TC", 100, save=False)
jv.accounts[1].update({
"party_type": "Customer",
"party": "_Test Customer USD"
})
self.assertRaises(InvalidAccountCurrency, jv.submit)
# back in USD
jv = make_journal_entry("_Test Bank USD - _TC",
"_Test Receivable USD - _TC", 100, save=False)
jv.accounts[1].update({
"party_type": "Customer",
"party": "_Test Customer USD"
})
jv.submit()
def test_inter_company_jv(self):
frappe.db.set_value("Account", "Sales Expenses - _TC", "inter_company_account", 1)
frappe.db.set_value("Account", "Buildings - _TC", "inter_company_account", 1)
frappe.db.set_value("Account", "Sales Expenses - _TC1", "inter_company_account", 1)
frappe.db.set_value("Account", "Buildings - _TC1", "inter_company_account", 1)
jv = make_journal_entry("Sales Expenses - _TC", "Buildings - _TC", 100, posting_date=nowdate(), cost_center = "Main - _TC", save=False)
jv.voucher_type = "Inter Company Journal Entry"
jv.multi_currency = 0
jv.insert()
jv.submit()
jv1 = make_journal_entry("Sales Expenses - _TC1", "Buildings - _TC1", 100, posting_date=nowdate(), cost_center = "Main - _TC1", save=False)
jv1.inter_company_journal_entry_reference = jv.name
jv1.company = "_Test Company 1"
jv1.voucher_type = "Inter Company Journal Entry"
jv1.multi_currency = 0
jv1.insert()
jv1.submit()
jv.reload()
self.assertEqual(jv.inter_company_journal_entry_reference, jv1.name)
self.assertEqual(jv1.inter_company_journal_entry_reference, jv.name)
jv.cancel()
jv1.reload()
jv.reload()
self.assertEqual(jv.inter_company_journal_entry_reference, "")
self.assertEqual(jv1.inter_company_journal_entry_reference, "")
def test_jv_for_enable_allow_cost_center_in_entry_of_bs_account(self):
from erpnext.accounts.doctype.cost_center.test_cost_center import create_cost_center
accounts_settings = frappe.get_doc('Accounts Settings', 'Accounts Settings')
accounts_settings.allow_cost_center_in_entry_of_bs_account = 1
accounts_settings.save()
cost_center = "_Test Cost Center for BS Account - _TC"
create_cost_center(cost_center_name="_Test Cost Center for BS Account", company="_Test Company")
jv = make_journal_entry("_Test Cash - _TC", "_Test Bank - _TC", 100, cost_center = cost_center, save=False)
jv.voucher_type = "Bank Entry"
jv.multi_currency = 0
jv.cheque_no = "112233"
jv.cheque_date = nowdate()
jv.insert()
jv.submit()
expected_values = {
"_Test Cash - _TC": {
"cost_center": cost_center
},
"_Test Bank - _TC": {
"cost_center": cost_center
}
}
gl_entries = frappe.db.sql("""select account, cost_center, debit, credit
from `tabGL Entry` where voucher_type='Journal Entry' and voucher_no=%s
order by account asc""", jv.name, as_dict=1)
self.assertTrue(gl_entries)
for gle in gl_entries:
self.assertEqual(expected_values[gle.account]["cost_center"], gle.cost_center)
accounts_settings.allow_cost_center_in_entry_of_bs_account = 0
accounts_settings.save()
def test_jv_account_and_party_balance_for_enable_allow_cost_center_in_entry_of_bs_account(self):
from erpnext.accounts.doctype.cost_center.test_cost_center import create_cost_center
from erpnext.accounts.utils import get_balance_on
accounts_settings = frappe.get_doc('Accounts Settings', 'Accounts Settings')
accounts_settings.allow_cost_center_in_entry_of_bs_account = 1
accounts_settings.save()
cost_center = "_Test Cost Center for BS Account - _TC"
create_cost_center(cost_center_name="_Test Cost Center for BS Account", company="_Test Company")
jv = make_journal_entry("_Test Cash - _TC", "_Test Bank - _TC", 100, cost_center = cost_center, save=False)
account_balance = get_balance_on(account="_Test Bank - _TC", cost_center=cost_center)
jv.voucher_type = "Bank Entry"
jv.multi_currency = 0
jv.cheque_no = "112233"
jv.cheque_date = nowdate()
jv.insert()
jv.submit()
expected_account_balance = account_balance - 100
account_balance = get_balance_on(account="_Test Bank - _TC", cost_center=cost_center)
self.assertEqual(expected_account_balance, account_balance)
accounts_settings.allow_cost_center_in_entry_of_bs_account = 0
accounts_settings.save()
def make_journal_entry(account1, account2, amount, cost_center=None, posting_date=None, exchange_rate=1, save=True, submit=False, project=None):
if not cost_center:
cost_center = "_Test Cost Center - _TC"
jv = frappe.new_doc("Journal Entry")
jv.posting_date = posting_date or nowdate()
jv.company = "_Test Company"
jv.user_remark = "test"
jv.multi_currency = 1
jv.set("accounts", [
{
"account": account1,
"cost_center": cost_center,
"project": project,
"debit_in_account_currency": amount if amount > 0 else 0,
"credit_in_account_currency": abs(amount) if amount < 0 else 0,
"exchange_rate": exchange_rate
}, {
"account": account2,
"cost_center": cost_center,
"project": project,
"credit_in_account_currency": amount if amount > 0 else 0,
"debit_in_account_currency": abs(amount) if amount < 0 else 0,
"exchange_rate": exchange_rate
}
])
if save or submit:
jv.insert()
if submit:
jv.submit()
return jv
test_records = frappe.get_test_records('Journal Entry')
| gpl-3.0 | 616,572,564,854,881,900 | -8,325,343,413,787,455,000 | 36.026578 | 144 | 0.702019 | false |
ATIX-AG/ansible | lib/ansible/vars/reserved.py | 40 | 2591 | # (c) 2017 Ansible By Red Hat
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.playbook import Play
from ansible.playbook.block import Block
from ansible.playbook.role import Role
from ansible.playbook.task import Task
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def get_reserved_names(include_private=True):
''' this function returns the list of reserved names associated with play objects'''
public = set()
private = set()
result = set()
# FIXME: find a way to 'not hardcode', possibly need role deps/includes
class_list = [Play, Role, Block, Task]
for aclass in class_list:
aobj = aclass()
# build ordered list to loop over and dict with attributes
for attribute in aobj.__dict__['_attributes']:
if 'private' in attribute:
private.add(attribute)
else:
public.add(attribute)
# local_action is implicit with action
if 'action' in public:
public.add('local_action')
# loop implies with_
# FIXME: remove after with_ is not only deprecated but removed
if 'loop' in private or 'loop' in public:
public.add('with_')
if include_private:
result = public.union(private)
else:
result = public
return result
def warn_if_reserved(myvars):
''' this function warns if any variable passed conflicts with internally reserved names '''
varnames = set(myvars)
varnames.discard('vars') # we add this one internally, so safe to ignore
for varname in varnames.intersection(_RESERVED_NAMES):
display.warning('Found variable using reserved name: %s' % varname)
def is_reserved_name(name):
return name in _RESERVED_NAMES
_RESERVED_NAMES = frozenset(get_reserved_names())
| gpl-3.0 | -1,778,185,650,982,532,600 | -3,453,147,682,760,264,700 | 29.845238 | 95 | 0.693169 | false |
CuonDeveloper/cuon | cuon_server/LoadBalancer/txlb/manager.py | 6 | 18051 | import os
import time
from datetime import datetime
from twisted.protocols import amp
from twisted.internet import protocol
from txlb import util
from txlb import model
from txlb import proxy
from txlb import config
from txlb import logging
from txlb import schedulers
class Error(Exception):
pass
class UnknownHostAndPortError(Exception):
"""
An operation was attempted that needed both host and port values to be
defined.
"""
class UnknowndServiceError(Error):
"""
An operation was invalid due to the fact that no service has been defined.
"""
def checkBadHosts(configuration, director):
"""
This function checks the director's hosts marked as "unavailable" and puts
them back into use.
"""
if not configuration.manager.hostCheckEnabled:
return
for name, service in director.getServices():
# since all proxies for a service share a tracker,
# we only need to check the first proxy.
group = service.getEnabledGroup()
tracker = director.getTracker(name, group.name)
badHosts = tracker.badhosts
for hostPort, timeAndError in badHosts.items():
when, what = badHosts[hostPort]
logging.log("re-adding %s automatically\n" % str(hostPort))
hostname = tracker.getHostNames()[hostPort]
del badHosts[hostPort]
tracker.newHost(hostPort, hostname)
def checkConfigChanges(configFile, configuration, director):
"""
This function replaces the current on-disk configuration with the
adjustments that have been made in-memory (likely from the admin web UI). A
backup of the original is made prior to replacement.
Also, changes made on disc should have the ability to be re-read into
memory. Obviously there are all sorts of issues at play, here: race
conditions, differences and the need to merge, conflict resolution, etc.
"""
if not configuration.manager.configCheckEnabled:
return
# disable the admin UI or at the very least, make it read-only
director.setReadOnly()
# compare in-memory config with on-disk config
current = configuration.toXML()
disk = config.Config(configFile).toXML()
if current != disk:
print "Configurations are different; backing up and saving to disk ..."
# backup old file
backupFile = "%s-%s" % (
configFile, datetime.now().strftime('%Y%m%d%H%M%S'))
os.rename(configFile, backupFile)
# save configuration
fh = open(configFile, 'w+')
fh.write(current)
fh.close()
# re-enable admin UI
director.setReadWrite()
class GetClientAddress(amp.Command):
"""
Note: supplied by Apple.
"""
arguments = [('host', amp.String()),
('port', amp.Integer())]
response = [('host', amp.String()),
('port', amp.Integer())]
errors = {UnknownHostAndPortError: 'UNKNOWN_PORT'}
class ControlProtocol(amp.AMP):
"""
Note: supplied by Apple.
"""
def __init__(self, director):
self.director = director
def getClientAddress(self, host, port):
host, port = self.director.getClientAddress(host, port)
if (host, port) == (None, None):
raise UnknownHostAndPortError()
return {'host': host, 'port': port}
GetClientAddress.responder(getClientAddress)
class ControlFactory(protocol.ServerFactory):
"""
Note: supplied by Apple.
"""
def __init__(self, director):
self.director = director
def buildProtocol(self, addr):
return ControlProtocol(self.director)
class ProxyManager(object):
"""
The purpose of this class is to start the load-balancer proxies for
enabled groups.
Note that this was formerly known as the Director, thus all the 'director'
variable names.
"""
def __init__(self, services=[]):
self.services = {}
if services:
for service in services:
self.addService(service)
self.proxies = {}
# XXX hopefully, the trackers attribute is temporary
self.trackers = {}
self._connections = {}
self.isReadOnly = False
def setReadOnly(self):
"""
Set the proxy manager to read-only; this is intended to be read by
other parts of the application (such as the admin interface) whenever
whenever mutable state items are being manipulated. It doesn't lock
anything, it simply provides something that can be read.
"""
self.isReadOnly = True
def setReadWrite(self):
"""
Set the proxy to read-write.
"""
self.isReadOnly = False
def setServices(self, services):
"""
This method is for use when it is necssary to set a collection of
model.ProxyService objects at once.
"""
self.services = services
def getServices(self):
"""
Return the keys and values of the services attribute.
"""
return self.services.items()
def getFirstService(self):
"""
This is useful when load balancing a service via the API, something
that one only does with a single service.
"""
return self.getServices()[0]
def addService(self, service):
"""
This method adds a model.ProxyService instance to the proxy manager.
"""
self.services[service.name] = service
def getService(self, serviceName):
"""
model.ProxyService instances can be retrieved from the proxy manager by
a key look-up
"""
return self.services[serviceName]
def getGroups(self, serviceName):
"""
Get the keys and values for the groups in a given service.
"""
return self.getService(serviceName).getGroups()
def getGroup(self, serviceName, groupName):
"""
For a proxy service that has been addded to the proxy manager,
model.ProxyGroup instances can be added to it.
"""
return self.getService(serviceName).getGroup(groupName)
def getHost(self, serviceName, groupName, hostName):
"""
mode.ProxyHost instances can be added to the proxy manager, but they
need to be associated with a proxy service and a proxy group.
"""
return self.getGroup().getHost(hostName)
def addTracker(self, serviceName, groupName, tracker):
"""
The tracker is the object that is responsible for recording the status
of connections, number of failuers, number of open connections, etc. A
tracker that is added to the proxy manager needs to be associated with
a proxy service and a proxy group.
"""
self.trackers[(serviceName, groupName)] = tracker
def getTracker(self, serviceName, groupName):
"""
Trackers can be looked up by the keys that were used to add them: proxy
service and proxy group names.
"""
return self.trackers[(serviceName,groupName)]
def getScheduler(self, serviceName, groupName):
"""
The sceduler is the object responsible for determining which host will
accpet the latest proxied request.
"""
return self.getGroup(serviceName, groupName).scheduler
def addProxy(self, serviceName, proxy):
"""
Add an already-created instance of proxy.Proxy to the manager's proxy
list.
"""
if not self.proxies.has_key(serviceName):
self.proxies[serviceName] = []
self.proxies[serviceName].append(proxy)
def createProxy(self, serviceName, host, port):
"""
Create a new Proxy and add it to the internal data structure. Note that
this is not a proxy model, but rather the proxy.Proxy object itself.
The parameters passed to Proxy will tell the load balancer on what
interface and port to listen for in-coming traffic.
"""
# proxies are associated with a specific tracker; trackers are
# associated with a specific service; proxies are also associated with
# a specific service, so there doesn't seem to be any need for an
# explicit association between proxies and trackers. The proxy can
# access the pm, which get get the tracker it needs.
p = proxy.Proxy(serviceName, host, port, self)
self.addProxy(serviceName, p)
def updateProxy(self, serviceName, index, newProxy):
"""
Sometimes (as in the case of changing the port on which the proxy is
listening) we need to update the proxy. This method allows one to do
this by specficically indentifying the proxy.
"""
self.proxies[serviceName][index] = newProxy
def getProxies(self):
"""
Return the keys and values for the proxies attribute. The proxies
attribute on the proxy manager stores a dictionay of proxy.Proxy
instances.
"""
return self.proxies.items()
def getProxy(self, serviceName, index=None):
"""
A Proxy instance can be retrieve by the service name and (since there
can be more than one port listening per service) index.
"""
proxies = self.proxies[serviceName]
if index == None:
return proxies
return proxies[index]
def addHost(self, serviceName, groupName, proxiedName, ip, weight=1):
"""
This method updates not only the tracker data, but the models as well.
"""
tracker = self.getTracker(serviceName=serviceName, groupName=groupName)
# XXX does the tracker need to know about weights?
tracker.newHost(name=proxiedName, ip=ip)
# add modeling information
host, port = util.splitHostPort(ip)
proxiedHost = model.ProxyHost(proxiedName, host, port, weight)
self.getGroup(serviceName, groupName).addHost(proxiedHost)
def delHost(self, serviceName, groupName, proxiedName, ip):
"""
This method updates not only the tracker data, but the models as well.
"""
tracker = self.getTracker(serviceName=serviceName, groupName=groupName)
tracker.delHost(name=proxiedName, ip=ip)
# remove from modeling information, too
self.getGroup(serviceName, groupName).delHost(proxiedName)
def switchGroup(self, serviceName, oldGroupName, newGroupName):
"""
This method needs to update the two affected proxy group models and
setup the new tracker.
"""
oldGroup = self.getService(serviceName).getGroup(oldGroupName)
oldGroup.disable()
newGroup = self.getService(serviceName).getGroup(newGroupName)
newGroup.enable()
for proxy in self.getProxy(serviceName):
proxy.setTracker(newGroupName)
def getClientAddress(self, host, port):
"""
"""
return self._connections.get((host, port), (None, None))
def setClientAddress(self, host, peer):
"""
"""
self._connections[host] = peer
def proxyManagerFactory(services):
"""
This factory is for simplifying the common task of creating a proxy manager
with presets for many attributes and/or much data.
"""
# check to see what got passed, in case we need to convert it
if isinstance(services[0], model.HostMapper):
services = model.convertMapperToModel(services)
# create the manager
pm = ProxyManager(services)
for serviceName, service in pm.getServices():
# set up the trackers for each group
for groupName, group in pm.getGroups(serviceName):
tracker = HostTracking(group)
scheduler = schedulers.schedulerFactory(group.lbType, tracker)
pm.addTracker(serviceName, groupName, tracker)
# now let's setup actual proxies for the hosts in the enabled group
group = service.getEnabledGroup()
# XXX maybe won't need this next line
#enabledTracker = pm.getTracker(service.name, group.name)
for host, port in service.addresses:
pm.createProxy(serviceName, host, port)
# return proxy manager
return pm
class HostTracking(object):
"""
This class is responsible for tracking proxied host metadata (such as
connection information and failure counts).
Schedulers are responsible for selecting the next proxied host that will
recieve the client request. Schedulers dependent upon their related
trackers (instances of this class) for connection information.
"""
def __init__(self, proxyGroup):
self.group = proxyGroup
self.hosts = []
self.hostnames = {}
self.badhosts = {}
self.openconns = {}
# the values in self.available indicate the number of connections that
# are currently being attempted; a down host is not in available
self.available = {}
self.failed = {}
self.totalconns = {}
self.lastclose = {}
# this next attribute gets set when a Scheduler is iniated; this class
# needs the scheduler attribute for nextHost calls
self.scheduler = None
self.initializeGroupHosts()
def initializeGroupHosts(self):
for hostName, host in self.group.getHosts():
self.newHost((host.hostname, host.port), hostName)
def getStats(self):
def sorter(attr):
sorts = {}
data = getattr(self, attr)
hostPortCounts = data.items()
hostPortCounts.sort()
for hostPort, count in hostPortCounts:
sorts['%s:%s' % hostPort] = count
return sorts
stats = {}
# we don't present open connections for hosts that aren't available
stats['openconns'] = sorter('available')
stats['totals'] = sorter('totalconns')
stats['failed'] = sorter('failed')
stats['bad'] = self.badhosts
return stats
def showStats(self, verbose=1):
stats = []
stats.append("%d open connections" % len(self.openconns.keys()))
hostPortCounts = self.available.items()
hostPortCounts.sort()
stats = stats + [str(x) for x in hostPortCounts]
if verbose:
openHosts = [x[1] for x in self.openconns.values()]
openHosts.sort()
stats = stats + [str(x) for x in openHosts]
return "\n".join(stats)
def getHost(self, senderFactory, client_addr=None):
host = self.scheduler.nextHost(client_addr)
if not host:
return None
cur = self.available.get(host)
self.openconns[senderFactory] = (time.time(), host)
self.available[host] += 1
return host
def getHostNames(self):
return self.hostnames
def doneHost(self, senderFactory):
try:
t, host = self.openconns[senderFactory]
except KeyError:
return
del self.openconns[senderFactory]
if self.available.get(host) is not None:
self.available[host] -= 1
self.totalconns[host] += 1
self.lastclose[host] = time.time()
def newHost(self, ip, name):
if type(ip) is not type(()):
ip = util.splitHostPort(ip)
self.hosts.append(ip)
self.hostnames[ip] = name
# XXX why is this needed too?
self.hostnames['%s:%d' % ip] = name
self.available[ip] = 0
self.totalconns[ip] = 0
def delHost(self, ip=None, name=None, activegroup=0):
"""
remove a host
"""
if ip is not None:
if type(ip) is not type(()):
ip = util.splitHostPort(ip)
elif name is not None:
for ip in self.hostnames.keys():
if self.hostnames[ip] == name:
break
raise ValueError, "No host named %s"%(name)
else:
raise ValueError, "Neither ip nor name supplied"
if activegroup and len(self.hosts) == 1:
return 0
if ip in self.hosts:
self.hosts.remove(ip)
del self.hostnames[ip]
del self.available[ip]
if self.failed.has_key(ip):
del self.failed[ip]
del self.totalconns[ip]
elif self.badhosts.has_key(ip):
del self.badhosts[ip]
else:
raise ValueError, "Couldn't find host"
return 1
def deadHost(self, senderFactory, reason='', doLog=True):
"""
This method gets called when a proxied host is unreachable.
"""
# if this throws an exception here, I think it's because all the hosts
# have been removed from the pool
try:
epochTime, hostPort = self.openconns[senderFactory]
except KeyError:
if doLog:
msg = """Wow, Bender says "We're boned." No hosts available.\n"""
logging.log(msg)
return
if not self.failed.has_key(hostPort):
self.failed[hostPort] = 1
else:
self.failed[hostPort] += 1
if hostPort in self.hosts:
if doLog:
logging.log("marking host %s down (%s)\n" % (
str(hostPort), reason.getErrorMessage()))
self.hosts.remove(hostPort)
if self.available.has_key(hostPort):
del self.available[hostPort]
# XXX I don't think we want to delete the previously gathered stats for
# the hosts that go bad... I'll keep this code here (but commented out)
# in case there's a good reason for it and I'm nost not thinking of it
# right now
#if self.totalconns.has_key(hostPort):
# del self.totalconns[hostPort]
self.badhosts[hostPort] = (time.time(), reason)
# make sure we also mark this session as done.
self.doneHost(senderFactory)
| gpl-3.0 | 5,282,726,971,384,443,000 | 515,725,802,699,472,000 | 30.892226 | 81 | 0.62052 | false |
rcbops/python-django-buildpackage | django/contrib/comments/__init__.py | 423 | 3333 | from django.conf import settings
from django.core import urlresolvers
from django.core.exceptions import ImproperlyConfigured
from django.contrib.comments.models import Comment
from django.contrib.comments.forms import CommentForm
from django.utils.importlib import import_module
DEFAULT_COMMENTS_APP = 'django.contrib.comments'
def get_comment_app():
"""
Get the comment app (i.e. "django.contrib.comments") as defined in the settings
"""
# Make sure the app's in INSTALLED_APPS
comments_app = get_comment_app_name()
if comments_app not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("The COMMENTS_APP (%r) "\
"must be in INSTALLED_APPS" % settings.COMMENTS_APP)
# Try to import the package
try:
package = import_module(comments_app)
except ImportError:
raise ImproperlyConfigured("The COMMENTS_APP setting refers to "\
"a non-existing package.")
return package
def get_comment_app_name():
"""
Returns the name of the comment app (either the setting value, if it
exists, or the default).
"""
return getattr(settings, 'COMMENTS_APP', DEFAULT_COMMENTS_APP)
def get_model():
"""
Returns the comment model class.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_model"):
return get_comment_app().get_model()
else:
return Comment
def get_form():
"""
Returns the comment ModelForm class.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_form"):
return get_comment_app().get_form()
else:
return CommentForm
def get_form_target():
"""
Returns the target URL for the comment form submission view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_form_target"):
return get_comment_app().get_form_target()
else:
return urlresolvers.reverse("django.contrib.comments.views.comments.post_comment")
def get_flag_url(comment):
"""
Get the URL for the "flag this comment" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_flag_url"):
return get_comment_app().get_flag_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.flag",
args=(comment.id,))
def get_delete_url(comment):
"""
Get the URL for the "delete this comment" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_delete_url"):
return get_comment_app().get_delete_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.delete",
args=(comment.id,))
def get_approve_url(comment):
"""
Get the URL for the "approve this comment from moderation" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_approve_url"):
return get_comment_app().get_approve_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.approve",
args=(comment.id,))
| bsd-3-clause | 848,273,189,875,241,300 | -1,228,349,162,470,176,800 | 35.626374 | 104 | 0.645065 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.