repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
tempbottle/ironpython3 | Src/StdLib/Lib/test/test_univnewlines.py | 130 | 3922 | # Tests universal newline support for both reading and parsing files.
import io
import _pyio as pyio
import unittest
import os
import sys
from test import support
if not hasattr(sys.stdin, 'newlines'):
raise unittest.SkipTest(
"This Python does not have universal newline support")
FATX = 'x' * (2**14)
DATA_TEMPLATE = [
"line1=1",
"line2='this is a very long line designed to go past any default " +
"buffer limits that exist in io.py but we also want to test " +
"the uncommon case, naturally.'",
"def line3():pass",
"line4 = '%s'" % FATX,
]
DATA_LF = "\n".join(DATA_TEMPLATE) + "\n"
DATA_CR = "\r".join(DATA_TEMPLATE) + "\r"
DATA_CRLF = "\r\n".join(DATA_TEMPLATE) + "\r\n"
# Note that DATA_MIXED also tests the ability to recognize a lone \r
# before end-of-file.
DATA_MIXED = "\n".join(DATA_TEMPLATE) + "\r"
DATA_SPLIT = [x + "\n" for x in DATA_TEMPLATE]
class CTest:
open = io.open
class PyTest:
open = staticmethod(pyio.open)
class TestGenericUnivNewlines:
# use a class variable DATA to define the data to write to the file
# and a class variable NEWLINE to set the expected newlines value
READMODE = 'r'
WRITEMODE = 'wb'
def setUp(self):
data = self.DATA
if "b" in self.WRITEMODE:
data = data.encode("ascii")
with self.open(support.TESTFN, self.WRITEMODE) as fp:
fp.write(data)
def tearDown(self):
try:
os.unlink(support.TESTFN)
except:
pass
def test_read(self):
with self.open(support.TESTFN, self.READMODE) as fp:
data = fp.read()
self.assertEqual(data, DATA_LF)
self.assertEqual(repr(fp.newlines), repr(self.NEWLINE))
def test_readlines(self):
with self.open(support.TESTFN, self.READMODE) as fp:
data = fp.readlines()
self.assertEqual(data, DATA_SPLIT)
self.assertEqual(repr(fp.newlines), repr(self.NEWLINE))
def test_readline(self):
with self.open(support.TESTFN, self.READMODE) as fp:
data = []
d = fp.readline()
while d:
data.append(d)
d = fp.readline()
self.assertEqual(data, DATA_SPLIT)
self.assertEqual(repr(fp.newlines), repr(self.NEWLINE))
def test_seek(self):
with self.open(support.TESTFN, self.READMODE) as fp:
fp.readline()
pos = fp.tell()
data = fp.readlines()
self.assertEqual(data, DATA_SPLIT[1:])
fp.seek(pos)
data = fp.readlines()
self.assertEqual(data, DATA_SPLIT[1:])
class TestCRNewlines(TestGenericUnivNewlines):
NEWLINE = '\r'
DATA = DATA_CR
class CTestCRNewlines(CTest, TestCRNewlines, unittest.TestCase): pass
class PyTestCRNewlines(PyTest, TestCRNewlines, unittest.TestCase): pass
class TestLFNewlines(TestGenericUnivNewlines):
NEWLINE = '\n'
DATA = DATA_LF
class CTestLFNewlines(CTest, TestLFNewlines, unittest.TestCase): pass
class PyTestLFNewlines(PyTest, TestLFNewlines, unittest.TestCase): pass
class TestCRLFNewlines(TestGenericUnivNewlines):
NEWLINE = '\r\n'
DATA = DATA_CRLF
def test_tell(self):
with self.open(support.TESTFN, self.READMODE) as fp:
self.assertEqual(repr(fp.newlines), repr(None))
data = fp.readline()
pos = fp.tell()
self.assertEqual(repr(fp.newlines), repr(self.NEWLINE))
class CTestCRLFNewlines(CTest, TestCRLFNewlines, unittest.TestCase): pass
class PyTestCRLFNewlines(PyTest, TestCRLFNewlines, unittest.TestCase): pass
class TestMixedNewlines(TestGenericUnivNewlines):
NEWLINE = ('\r', '\n')
DATA = DATA_MIXED
class CTestMixedNewlines(CTest, TestMixedNewlines, unittest.TestCase): pass
class PyTestMixedNewlines(PyTest, TestMixedNewlines, unittest.TestCase): pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
elysium001/zamboni | scripts/gaia_package.py | 18 | 1605 | """
Gaia Marketplace helper. Takes packaged manifest as only argument.
Copy this into the Marketplace app folder (dev/stage/prod whatever), and run.
Fetches Marketplace package and Etags (e.g., m.f.c/packaged.webapp).
Downloads application.zip for you.
If metadata.json is in path, replaces the appropriate fields.
>> python gaia_package.py https://marketplace.firefox.com/packaged.webapp
And you're done!
"""
import json
import os
import requests
import sys
try:
manifest_url = sys.argv[1]
if not manifest_url.startswith('http'):
raise
except:
print "Please give a valid manifest (e.g., m.f.c/packaged.webapp)."
sys.exit(0)
r = requests.get(manifest_url)
package_path = json.loads(r.content)['package_path']
etag = r.headers['etag'].replace('"', '')
print "Downloading package"
r = requests.get(package_path)
package = r.content
package_etag = r.headers['etag'].replace('"', '')
f = open('application.zip', 'w')
f.write(package)
print "Package path: %s" % package_path
print "Etag: %s" % etag
print "Package Etag: %s" % package_etag
filename = 'metadata.json'
try:
f = open(filename, 'rw')
except:
sys.exit(0)
print "Updating metadata.json"
tmp_filename = 'metadata.json.tmp'
tmp_f = open(tmp_filename, 'w')
for line in f:
if '"etag"' in line:
line = r'%s%s%s' % (line[0:13], etag, line[-5:])
line = line.replace(r'\\', r'\\\\')
elif '"packageEtag"' in line:
line = r'%s%s%s' % (line[0:20], package_etag, line[-5:])
line = line.replace(r'\\', r'\\\\')
tmp_f.write(line)
tmp_f.close()
os.rename(tmp_filename, filename)
| bsd-3-clause |
francisco-dlp/hyperspy | hyperspy/drawing/utils.py | 1 | 57321 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import itertools
import textwrap
from traits import trait_base
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backend_bases import key_press_handler
import warnings
import numpy as np
from distutils.version import LooseVersion
import logging
import hyperspy as hs
_logger = logging.getLogger(__name__)
def contrast_stretching(data, saturated_pixels):
"""Calculate bounds that leaves out a given percentage of the data.
Parameters
----------
data: numpy array
saturated_pixels: scalar, None
The percentage of pixels that are left out of the bounds. For example,
the low and high bounds of a value of 1 are the 0.5% and 99.5%
percentiles. It must be in the [0, 100] range. If None, set the value
to 0.
Returns
-------
vmin, vmax: scalar
The low and high bounds
Raises
------
ValueError if the value of `saturated_pixels` is out of the valid range.
"""
# Sanity check
if saturated_pixels is None:
saturated_pixels = 0
if not 0 <= saturated_pixels <= 100:
raise ValueError(
"saturated_pixels must be a scalar in the range[0, 100]")
vmin = np.nanpercentile(data, saturated_pixels / 2.)
vmax = np.nanpercentile(data, 100 - saturated_pixels / 2.)
return vmin, vmax
MPL_DIVERGING_COLORMAPS = [
"BrBG",
"bwr",
"coolwarm",
"PiYG",
"PRGn",
"PuOr",
"RdBu",
"RdGy",
"RdYIBu",
"RdYIGn",
"seismic",
"Spectral", ]
# Add reversed colormaps
MPL_DIVERGING_COLORMAPS += [cmap + "_r" for cmap in MPL_DIVERGING_COLORMAPS]
def centre_colormap_values(vmin, vmax):
"""Calculate vmin and vmax to set the colormap midpoint to zero.
Parameters
----------
vmin, vmax : scalar
The range of data to display.
Returns
-------
cvmin, cvmax : scalar
The values to obtain a centre colormap.
"""
absmax = max(abs(vmin), abs(vmax))
return -absmax, absmax
def create_figure(window_title=None,
_on_figure_window_close=None,
disable_xyscale_keys=False,
**kwargs):
"""Create a matplotlib figure.
This function adds the possibility to execute another function
when the figure is closed and to easily set the window title. Any
keyword argument is passed to the plt.figure function
Parameters
----------
window_title : string
_on_figure_window_close : function
disable_xyscale_keys : bool, disable the `k`, `l` and `L` shortcuts which
toggle the x or y axis between linear and log scale.
Returns
-------
fig : plt.figure
"""
fig = plt.figure(**kwargs)
if window_title is not None:
# remove non-alphanumeric characters to prevent file saving problems
# This is a workaround for:
# https://github.com/matplotlib/matplotlib/issues/9056
reserved_characters = r'<>"/\|?*'
for c in reserved_characters:
window_title = window_title.replace(c, '')
window_title = window_title.replace('\n', ' ')
window_title = window_title.replace(':', ' -')
fig.canvas.set_window_title(window_title)
if disable_xyscale_keys and hasattr(fig.canvas, 'toolbar'):
# hack the `key_press_handler` to disable the `k`, `l`, `L` shortcuts
manager = fig.canvas.manager
fig.canvas.mpl_disconnect(manager.key_press_handler_id)
manager.key_press_handler_id = manager.canvas.mpl_connect(
'key_press_event',
lambda event: key_press_handler_custom(event, manager.canvas))
if _on_figure_window_close is not None:
on_figure_window_close(fig, _on_figure_window_close)
return fig
def key_press_handler_custom(event, canvas):
if event.key not in ['k', 'l', 'L']:
key_press_handler(event, canvas, canvas.manager.toolbar)
def on_figure_window_close(figure, function):
"""Connects a close figure signal to a given function.
Parameters
----------
figure : mpl figure instance
function : function
"""
def function_wrapper(evt):
function()
figure.canvas.mpl_connect('close_event', function_wrapper)
def plot_RGB_map(im_list, normalization='single', dont_plot=False):
"""Plot 2 or 3 maps in RGB.
Parameters
----------
im_list : list of Signal2D instances
normalization : {'single', 'global'}
dont_plot : bool
Returns
-------
array: RGB matrix
"""
# from widgets import cursors
height, width = im_list[0].data.shape[:2]
rgb = np.zeros((height, width, 3))
rgb[:, :, 0] = im_list[0].data.squeeze()
rgb[:, :, 1] = im_list[1].data.squeeze()
if len(im_list) == 3:
rgb[:, :, 2] = im_list[2].data.squeeze()
if normalization == 'single':
for i in range(len(im_list)):
rgb[:, :, i] /= rgb[:, :, i].max()
elif normalization == 'global':
rgb /= rgb.max()
rgb = rgb.clip(0, rgb.max())
if not dont_plot:
figure = plt.figure()
ax = figure.add_subplot(111)
ax.frameon = False
ax.set_axis_off()
ax.imshow(rgb, interpolation='nearest')
# cursors.set_mpl_ax(ax)
figure.canvas.draw_idle()
else:
return rgb
def subplot_parameters(fig):
"""Returns a list of the subplot parameters of a mpl figure.
Parameters
----------
fig : mpl figure
Returns
-------
tuple : (left, bottom, right, top, wspace, hspace)
"""
wspace = fig.subplotpars.wspace
hspace = fig.subplotpars.hspace
left = fig.subplotpars.left
right = fig.subplotpars.right
top = fig.subplotpars.top
bottom = fig.subplotpars.bottom
return left, bottom, right, top, wspace, hspace
class ColorCycle:
_color_cycle = [mpl.colors.colorConverter.to_rgba(color) for color
in ('b', 'g', 'r', 'c', 'm', 'y', 'k')]
def __init__(self):
self.color_cycle = copy.copy(self._color_cycle)
def __call__(self):
if not self.color_cycle:
self.color_cycle = copy.copy(self._color_cycle)
return self.color_cycle.pop(0)
def plot_signals(signal_list, sync=True, navigator="auto",
navigator_list=None, **kwargs):
"""Plot several signals at the same time.
Parameters
----------
signal_list : list of BaseSignal instances
If sync is set to True, the signals must have the
same navigation shape, but not necessarily the same signal shape.
sync : True or False, default "True"
If True: the signals will share navigation, all the signals
must have the same navigation shape for this to work, but not
necessarily the same signal shape.
navigator : {"auto", None, "spectrum", "slider", BaseSignal}, default "auto"
See signal.plot docstring for full description
navigator_list : {List of navigator arguments, None}, default None
Set different navigator options for the signals. Must use valid
navigator arguments: "auto", None, "spectrum", "slider", or a
hyperspy Signal. The list must have the same size as signal_list.
If None, the argument specified in navigator will be used.
**kwargs
Any extra keyword arguments are passed to each signal `plot` method.
Example
-------
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll])
Specifying the navigator:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll], navigator="slider")
Specifying the navigator for each signal:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> s_edx = hs.load("edx.dm3")
>>> s_adf = hs.load("adf.dm3")
>>> hs.plot.plot_signals(
[s_cl, s_ll, s_edx], navigator_list=["slider",None,s_adf])
"""
import hyperspy.signal
if navigator_list:
if not (len(signal_list) == len(navigator_list)):
raise ValueError(
"signal_list and navigator_list must"
" have the same size")
if sync:
axes_manager_list = []
for signal in signal_list:
axes_manager_list.append(signal.axes_manager)
if not navigator_list:
navigator_list = []
if navigator is None:
navigator_list.extend([None] * len(signal_list))
elif isinstance(navigator, hyperspy.signal.BaseSignal):
navigator_list.append(navigator)
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator == "slider":
navigator_list.append("slider")
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator == "spectrum":
navigator_list.extend(["spectrum"] * len(signal_list))
elif navigator == "auto":
navigator_list.extend(["auto"] * len(signal_list))
else:
raise ValueError(
"navigator must be one of \"spectrum\",\"auto\","
" \"slider\", None, a Signal instance")
# Check to see if the spectra have the same navigational shapes
temp_shape_first = axes_manager_list[0].navigation_shape
for i, axes_manager in enumerate(axes_manager_list):
temp_shape = axes_manager.navigation_shape
if not (temp_shape_first == temp_shape):
raise ValueError(
"The spectra does not have the same navigation shape")
axes_manager_list[i] = axes_manager.deepcopy()
if i > 0:
for axis0, axisn in zip(axes_manager_list[0].navigation_axes,
axes_manager_list[i].navigation_axes):
axes_manager_list[i]._axes[axisn.index_in_array] = axis0
del axes_manager
for signal, navigator, axes_manager in zip(signal_list,
navigator_list,
axes_manager_list):
signal.plot(axes_manager=axes_manager,
navigator=navigator,
**kwargs)
# If sync is False
else:
if not navigator_list:
navigator_list = []
navigator_list.extend([navigator] * len(signal_list))
for signal, navigator in zip(signal_list, navigator_list):
signal.plot(navigator=navigator,
**kwargs)
def _make_heatmap_subplot(spectra):
from hyperspy._signals.signal2d import Signal2D
im = Signal2D(spectra.data, axes=spectra.axes_manager._get_axes_dicts())
im.metadata.General.title = spectra.metadata.General.title
im.plot()
return im._plot.signal_plot.ax
def set_xaxis_lims(mpl_ax, hs_axis):
"""
Set the matplotlib axis limits to match that of a HyperSpy axis
Parameters
----------
mpl_ax : :class:`matplotlib.axis.Axis`
The ``matplotlib`` axis to change
hs_axis : :class:`~hyperspy.axes.DataAxis`
The data axis that contains the values that control the scaling
"""
x_axis_lower_lim = hs_axis.axis[0]
x_axis_upper_lim = hs_axis.axis[-1]
mpl_ax.set_xlim(x_axis_lower_lim, x_axis_upper_lim)
def _make_overlap_plot(spectra, ax, color="blue", line_style='-'):
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
spectrum = _transpose_if_required(spectrum, 1)
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_ylabel('Intensity')
ax.autoscale(tight=True)
def _make_cascade_subplot(
spectra, ax, color="blue", line_style='-', padding=1):
max_value = 0
for spectrum in spectra:
spectrum_yrange = (np.nanmax(spectrum.data) -
np.nanmin(spectrum.data))
if spectrum_yrange > max_value:
max_value = spectrum_yrange
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
spectrum = _transpose_if_required(spectrum, 1)
data_to_plot = ((spectrum.data - spectrum.data.min()) /
float(max_value) + spectrum_index * padding)
ax.plot(x_axis.axis, data_to_plot, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_yticks([])
ax.autoscale(tight=True)
def _plot_spectrum(spectrum, ax, color="blue", line_style='-'):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
def _set_spectrum_xlabel(spectrum, ax):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.set_xlabel("%s (%s)" % (x_axis.name, x_axis.units))
def _transpose_if_required(signal, expected_dimension):
# EDS profiles or maps have signal dimension = 0 and navigation dimension
# 1 or 2. For convenience transpose the signal if possible
if (signal.axes_manager.signal_dimension == 0 and
signal.axes_manager.navigation_dimension == expected_dimension):
return signal.T
else:
return signal
def plot_images(images,
cmap=None,
no_nans=False,
per_row=3,
label='auto',
labelwrap=30,
suptitle=None,
suptitle_fontsize=18,
colorbar='multi',
centre_colormap="auto",
saturated_pixels=0,
scalebar=None,
scalebar_color='white',
axes_decor='all',
padding=None,
tight_layout=False,
aspect='auto',
min_asp=0.1,
namefrac_thresh=0.4,
fig=None,
vmin=None,
vmax=None,
*args,
**kwargs):
"""Plot multiple images as sub-images in one figure.
Extra keyword arguments are passed to `matplotlib.figure`.
Parameters
----------
images : list of Signal2D or BaseSignal
`images` should be a list of Signals to plot. For `BaseSignal` with
navigation dimensions 2 and signal dimension 0, the signal will be
tranposed to form a `Signal2D`.
Multi-dimensional images will have each plane plotted as a separate
image.
If any signal shape is not suitable, a ValueError will be raised.
cmap : matplotlib colormap, list, or ``'mpl_colors'``, *optional*
The colormap used for the images, by default read from ``pyplot``.
A list of colormaps can also be provided, and the images will
cycle through them. Optionally, the value ``'mpl_colors'`` will
cause the cmap to loop through the default ``matplotlib``
colors (to match with the default output of the
:py:func:`~.drawing.utils.plot_spectra` method.
Note: if using more than one colormap, using the ``'single'``
option for ``colorbar`` is disallowed.
no_nans : bool, optional
If True, set nans to zero for plotting.
per_row : int, optional
The number of plots in each row
label : None, str, or list of str, optional
Control the title labeling of the plotted images.
If None, no titles will be shown.
If 'auto' (default), function will try to determine suitable titles
using Signal2D titles, falling back to the 'titles' option if no good
short titles are detected.
Works best if all images to be plotted have the same beginning
to their titles.
If 'titles', the title from each image's metadata.General.title
will be used.
If any other single str, images will be labeled in sequence using
that str as a prefix.
If a list of str, the list elements will be used to determine the
labels (repeated, if necessary).
labelwrap : int, optional
integer specifying the number of characters that will be used on
one line
If the function returns an unexpected blank figure, lower this
value to reduce overlap of the labels between each figure
suptitle : str, optional
Title to use at the top of the figure. If called with label='auto',
this parameter will override the automatically determined title.
suptitle_fontsize : int, optional
Font size to use for super title at top of figure
colorbar : {'multi', None, 'single'}
Controls the type of colorbars that are plotted.
If None, no colorbar is plotted.
If 'multi' (default), individual colorbars are plotted for each
(non-RGB) image
If 'single', all (non-RGB) images are plotted on the same scale,
and one colorbar is shown for all
centre_colormap : {"auto", True, False}
If True the centre of the color scheme is set to zero. This is
specially useful when using diverging color schemes. If "auto"
(default), diverging color schemes are automatically centred.
saturated_pixels: None, scalar or list of scalar, optional, default: 0
If list of scalar, the length should match the number of images to
show. If provide in the list, set the value to 0.
The percentage of pixels that are left out of the bounds. For
example, the low and high bounds of a value of 1 are the 0.5% and
99.5% percentiles. It must be in the [0, 100] range.
scalebar : {None, 'all', list of ints}, optional
If None (or False), no scalebars will be added to the images.
If 'all', scalebars will be added to all images.
If list of ints, scalebars will be added to each image specified.
scalebar_color : str, optional
A valid MPL color string; will be used as the scalebar color
axes_decor : {'all', 'ticks', 'off', None}, optional
Controls how the axes are displayed on each image; default is 'all'
If 'all', both ticks and axis labels will be shown
If 'ticks', no axis labels will be shown, but ticks/labels will
If 'off', all decorations and frame will be disabled
If None, no axis decorations will be shown, but ticks/frame will
padding : None or dict, optional
This parameter controls the spacing between images.
If None, default options will be used
Otherwise, supply a dictionary with the spacing options as
keywords and desired values as values
Values should be supplied as used in pyplot.subplots_adjust(),
and can be:
'left', 'bottom', 'right', 'top', 'wspace' (width),
and 'hspace' (height)
tight_layout : bool, optional
If true, hyperspy will attempt to improve image placement in
figure using matplotlib's tight_layout
If false, repositioning images inside the figure will be left as
an exercise for the user.
aspect : str or numeric, optional
If 'auto', aspect ratio is auto determined, subject to min_asp.
If 'square', image will be forced onto square display.
If 'equal', aspect ratio of 1 will be enforced.
If float (or int/long), given value will be used.
min_asp : float, optional
Minimum aspect ratio to be used when plotting images
namefrac_thresh : float, optional
Threshold to use for auto-labeling. This parameter controls how
much of the titles must be the same for the auto-shortening of
labels to activate. Can vary from 0 to 1. Smaller values
encourage shortening of titles by auto-labeling, while larger
values will require more overlap in titles before activing the
auto-label code.
fig : mpl figure, optional
If set, the images will be plotted to an existing MPL figure
vmin, vmax : scalar or list of scalar, optional, default: None
If list of scalar, the length should match the number of images to
show.
A list of scalar is not compatible with a single colorbar.
See vmin, vmax of matplotlib.imshow() for more details.
*args, **kwargs, optional
Additional arguments passed to matplotlib.imshow()
Returns
-------
axes_list : list
a list of subplot axes that hold the images
See Also
--------
plot_spectra : Plotting of multiple spectra
plot_signals : Plotting of multiple signals
plot_histograms : Compare signal histograms
Notes
-----
`interpolation` is a useful parameter to provide as a keyword
argument to control how the space between pixels is interpolated. A
value of ``'nearest'`` will cause no interpolation between pixels.
`tight_layout` is known to be quite brittle, so an option is provided
to disable it. Turn this option off if output is not as expected,
or try adjusting `label`, `labelwrap`, or `per_row`
"""
def __check_single_colorbar(cbar):
if cbar == 'single':
raise ValueError('Cannot use a single colorbar with multiple '
'colormaps. Please check for compatible '
'arguments.')
from hyperspy.drawing.widgets import ScaleBar
from hyperspy.misc import rgb_tools
from hyperspy.signal import BaseSignal
# Check that we have a hyperspy signal
im = [images] if not isinstance(images, (list, tuple)) else images
for image in im:
if not isinstance(image, BaseSignal):
raise ValueError("`images` must be a list of image signals or a "
"multi-dimensional signal."
" " + repr(type(images)) + " was given.")
# For list of EDS maps, transpose the BaseSignal
if isinstance(images, (list, tuple)):
images = [_transpose_if_required(image, 2) for image in images]
# If input is >= 1D signal (e.g. for multi-dimensional plotting),
# copy it and put it in a list so labeling works out as (x,y) when plotting
if isinstance(images,
BaseSignal) and images.axes_manager.navigation_dimension > 0:
images = [images._deepcopy_with_new_data(images.data)]
n = 0
for i, sig in enumerate(images):
if sig.axes_manager.signal_dimension != 2:
raise ValueError("This method only plots signals that are images. "
"The signal dimension must be equal to 2. "
"The signal at position " + repr(i) +
" was " + repr(sig) + ".")
# increment n by the navigation size, or by 1 if the navigation size is
# <= 0
n += (sig.axes_manager.navigation_size
if sig.axes_manager.navigation_size > 0
else 1)
# If no cmap given, get default colormap from pyplot:
if cmap is None:
cmap = [plt.get_cmap().name]
elif cmap == 'mpl_colors':
for n_color, c in enumerate(mpl.rcParams['axes.prop_cycle']):
make_cmap(colors=['#000000', c['color']],
name='mpl{}'.format(n_color))
cmap = ['mpl{}'.format(i) for i in
range(len(mpl.rcParams['axes.prop_cycle']))]
__check_single_colorbar(colorbar)
# cmap is list, tuple, or something else iterable (but not string):
elif hasattr(cmap, '__iter__') and not isinstance(cmap, str):
try:
cmap = [c.name for c in cmap] # convert colormap to string
except AttributeError:
cmap = [c for c in cmap] # c should be string if not colormap
__check_single_colorbar(colorbar)
elif isinstance(cmap, mpl.colors.Colormap):
cmap = [cmap.name] # convert single colormap to list with string
elif isinstance(cmap, str):
cmap = [cmap] # cmap is single string, so make it a list
else:
# Didn't understand cmap input, so raise error
raise ValueError('The provided cmap value was not understood. Please '
'check input values.')
# If any of the cmaps given are diverging, and auto-centering, set the
# appropriate flag:
if centre_colormap == "auto":
centre_colormaps = []
for c in cmap:
if c in MPL_DIVERGING_COLORMAPS:
centre_colormaps.append(True)
else:
centre_colormaps.append(False)
# if it was True, just convert to list
elif centre_colormap:
centre_colormaps = [True]
# likewise for false
elif not centre_colormap:
centre_colormaps = [False]
# finally, convert lists to cycle generators for adaptive length:
centre_colormaps = itertools.cycle(centre_colormaps)
cmap = itertools.cycle(cmap)
def _check_arg(arg, default_value, arg_name):
if isinstance(arg, list):
if len(arg) != n:
_logger.warning('The provided {} values are ignored because the '
'length of the list does not match the number of '
'images'.format(arg_name))
arg = [default_value] * n
else:
arg = [arg] * n
return arg
vmin = _check_arg(vmin, None, 'vmin')
vmax = _check_arg(vmax, None, 'vmax')
saturated_pixels = _check_arg(saturated_pixels, 0, 'saturated_pixels')
# Sort out the labeling:
div_num = 0
all_match = False
shared_titles = False
user_labels = False
if label is None:
pass
elif label == 'auto':
# Use some heuristics to try to get base string of similar titles
label_list = [x.metadata.General.title for x in images]
# Find the shortest common string between the image titles
# and pull that out as the base title for the sequence of images
# array in which to store arrays
res = np.zeros((len(label_list), len(label_list[0]) + 1))
res[:, 0] = 1
# j iterates the strings
for j in range(len(label_list)):
# i iterates length of substring test
for i in range(1, len(label_list[0]) + 1):
# stores whether or not characters in title match
res[j, i] = label_list[0][:i] in label_list[j]
# sum up the results (1 is True, 0 is False) and create
# a substring based on the minimum value (this will be
# the "smallest common string" between all the titles
if res.all():
basename = label_list[0]
div_num = len(label_list[0])
all_match = True
else:
div_num = int(min(np.sum(res, 1)))
basename = label_list[0][:div_num - 1]
all_match = False
# trim off any '(' or ' ' characters at end of basename
if div_num > 1:
while True:
if basename[len(basename) - 1] == '(':
basename = basename[:-1]
elif basename[len(basename) - 1] == ' ':
basename = basename[:-1]
else:
break
# namefrac is ratio of length of basename to the image name
# if it is high (e.g. over 0.5), we can assume that all images
# share the same base
if len(label_list[0]) > 0:
namefrac = float(len(basename)) / len(label_list[0])
else:
# If label_list[0] is empty, it means there was probably no
# title set originally, so nothing to share
namefrac = 0
if namefrac > namefrac_thresh:
# there was a significant overlap of label beginnings
shared_titles = True
# only use new suptitle if one isn't specified already
if suptitle is None:
suptitle = basename
else:
# there was not much overlap, so default back to 'titles' mode
shared_titles = False
label = 'titles'
div_num = 0
elif label == 'titles':
# Set label_list to each image's pre-defined title
label_list = [x.metadata.General.title for x in images]
elif isinstance(label, str):
# Set label_list to an indexed list, based off of label
label_list = [label + " " + repr(num) for num in range(n)]
elif isinstance(label, list) and all(
isinstance(x, str) for x in label):
label_list = label
user_labels = True
# If list of labels is longer than the number of images, just use the
# first n elements
if len(label_list) > n:
del label_list[n:]
if len(label_list) < n:
label_list *= (n // len(label_list)) + 1
del label_list[n:]
else:
raise ValueError("Did not understand input of labels.")
# Determine appropriate number of images per row
rows = int(np.ceil(n / float(per_row)))
if n < per_row:
per_row = n
# Set overall figure size and define figure (if not pre-existing)
if fig is None:
k = max(plt.rcParams['figure.figsize']) / max(per_row, rows)
f = plt.figure(figsize=(tuple(k * i for i in (per_row, rows))))
else:
f = fig
# Initialize list to hold subplot axes
axes_list = []
# Initialize list of rgb tags
isrgb = [False] * len(images)
# Check to see if there are any rgb images in list
# and tag them using the isrgb list
for i, img in enumerate(images):
if rgb_tools.is_rgbx(img.data):
isrgb[i] = True
# Determine how many non-rgb Images there are
non_rgb = list(itertools.compress(images, [not j for j in isrgb]))
if len(non_rgb) == 0 and colorbar is not None:
colorbar = None
warnings.warn("Sorry, colorbar is not implemented for RGB images.")
# Find global min and max values of all the non-rgb images for use with
# 'single' scalebar
if colorbar == 'single':
# get a g_saturated_pixels from saturated_pixels
if isinstance(saturated_pixels, list):
g_saturated_pixels = min(np.array([v for v in saturated_pixels]))
else:
g_saturated_pixels = saturated_pixels
# estimate a g_vmin and g_max from saturated_pixels
g_vmin, g_vmax = contrast_stretching(np.concatenate(
[i.data.flatten() for i in non_rgb]), g_saturated_pixels)
# if vmin and vmax are provided, override g_min and g_max
if isinstance(vmin, list):
_logger.warning('vmin have to be a scalar to be compatible with a '
'single colorbar')
else:
g_vmin = vmin if vmin is not None else g_vmin
if isinstance(vmax, list):
_logger.warning('vmax have to be a scalar to be compatible with a '
'single colorbar')
else:
g_vmax = vmax if vmax is not None else g_vmax
if next(centre_colormaps):
g_vmin, g_vmax = centre_colormap_values(g_vmin, g_vmax)
# Check if we need to add a scalebar for some of the images
if isinstance(scalebar, list) and all(isinstance(x, int)
for x in scalebar):
scalelist = True
else:
scalelist = False
idx = 0
ax_im_list = [0] * len(isrgb)
# Replot: create a list to store references to the images
replot_ims = []
# Loop through each image, adding subplot for each one
for i, ims in enumerate(images):
# Get handles for the signal axes and axes_manager
axes_manager = ims.axes_manager
if axes_manager.navigation_dimension > 0:
ims = ims._deepcopy_with_new_data(ims.data)
for j, im in enumerate(ims):
ax = f.add_subplot(rows, per_row, idx + 1)
axes_list.append(ax)
data = im.data
centre = next(centre_colormaps) # get next value for centreing
# Enable RGB plotting
if rgb_tools.is_rgbx(data):
data = rgb_tools.rgbx2regular_array(data, plot_friendly=True)
l_vmin, l_vmax = None, None
else:
data = im.data
# Find min and max for contrast
l_vmin, l_vmax = contrast_stretching(
data, saturated_pixels[idx])
l_vmin = vmin[idx] if vmin[idx] is not None else l_vmin
l_vmax = vmax[idx] if vmax[idx] is not None else l_vmax
if centre:
l_vmin, l_vmax = centre_colormap_values(l_vmin, l_vmax)
# Remove NaNs (if requested)
if no_nans:
data = np.nan_to_num(data)
# Get handles for the signal axes and axes_manager
axes_manager = im.axes_manager
axes = axes_manager.signal_axes
# Set dimensions of images
xaxis = axes[0]
yaxis = axes[1]
extent = (
xaxis.low_value,
xaxis.high_value,
yaxis.high_value,
yaxis.low_value,
)
if not isinstance(aspect, (int, float)) and aspect not in [
'auto', 'square', 'equal']:
_logger.warning("Did not understand aspect ratio input. "
"Using 'auto' as default.")
aspect = 'auto'
if aspect == 'auto':
if float(yaxis.size) / xaxis.size < min_asp:
factor = min_asp * float(xaxis.size) / yaxis.size
elif float(yaxis.size) / xaxis.size > min_asp ** -1:
factor = min_asp ** -1 * float(xaxis.size) / yaxis.size
else:
factor = 1
asp = np.abs(factor * float(xaxis.scale) / yaxis.scale)
elif aspect == 'square':
asp = abs(extent[1] - extent[0]) / abs(extent[3] - extent[2])
elif aspect == 'equal':
asp = 1
elif isinstance(aspect, (int, float)):
asp = aspect
if 'interpolation' not in kwargs.keys():
kwargs['interpolation'] = 'nearest'
# Get colormap for this image:
cm = next(cmap)
# Plot image data, using vmin and vmax to set bounds,
# or allowing them to be set automatically if using individual
# colorbars
if colorbar == 'single' and not isrgb[i]:
axes_im = ax.imshow(data,
cmap=cm,
extent=extent,
vmin=g_vmin, vmax=g_vmax,
aspect=asp,
*args, **kwargs)
ax_im_list[i] = axes_im
else:
axes_im = ax.imshow(data,
cmap=cm,
extent=extent,
vmin=l_vmin,
vmax=l_vmax,
aspect=asp,
*args, **kwargs)
ax_im_list[i] = axes_im
# If an axis trait is undefined, shut off :
if isinstance(xaxis.units, trait_base._Undefined) or \
isinstance(yaxis.units, trait_base._Undefined) or \
isinstance(xaxis.name, trait_base._Undefined) or \
isinstance(yaxis.name, trait_base._Undefined):
if axes_decor == 'all':
_logger.warning(
'Axes labels were requested, but one '
'or both of the '
'axes units and/or name are undefined. '
'Axes decorations have been set to '
'\'ticks\' instead.')
axes_decor = 'ticks'
# If all traits are defined, set labels as appropriate:
else:
ax.set_xlabel(axes[0].name + " axis (" + axes[0].units + ")")
ax.set_ylabel(axes[1].name + " axis (" + axes[1].units + ")")
if label:
if all_match:
title = ''
elif shared_titles:
title = label_list[i][div_num - 1:]
else:
if len(ims) == n:
# This is true if we are plotting just 1
# multi-dimensional Signal2D
title = label_list[idx]
elif user_labels:
title = label_list[idx]
else:
title = label_list[i]
if ims.axes_manager.navigation_size > 1 and not user_labels:
title += " %s" % str(ims.axes_manager.indices)
ax.set_title(textwrap.fill(title, labelwrap))
# Set axes decorations based on user input
set_axes_decor(ax, axes_decor)
# If using independent colorbars, add them
if colorbar == 'multi' and not isrgb[i]:
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(axes_im, cax=cax)
# Add scalebars as necessary
if (scalelist and idx in scalebar) or scalebar == 'all':
ax.scalebar = ScaleBar(
ax=ax,
units=axes[0].units,
color=scalebar_color,
)
# Replot: store references to the images
replot_ims.append(im)
idx += 1
# If using a single colorbar, add it, and do tight_layout, ensuring that
# a colorbar is only added based off of non-rgb Images:
if colorbar == 'single':
foundim = None
for i in range(len(isrgb)):
if (not isrgb[i]) and foundim is None:
foundim = i
if foundim is not None:
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.9, 0.1, 0.03, 0.8])
f.colorbar(ax_im_list[foundim], cax=cbar_ax)
if tight_layout:
# tight_layout, leaving room for the colorbar
plt.tight_layout(rect=[0, 0, 0.9, 1])
elif tight_layout:
plt.tight_layout()
elif tight_layout:
plt.tight_layout()
# Set top bounds for shared titles and add suptitle
if suptitle:
f.subplots_adjust(top=0.85)
f.suptitle(suptitle, fontsize=suptitle_fontsize)
# If we want to plot scalebars, loop through the list of axes and add them
if scalebar is None or scalebar is False:
# Do nothing if no scalebars are called for
pass
elif scalebar == 'all':
# scalebars were taken care of in the plotting loop
pass
elif scalelist:
# scalebars were taken care of in the plotting loop
pass
else:
raise ValueError("Did not understand scalebar input. Must be None, "
"\'all\', or list of ints.")
# Adjust subplot spacing according to user's specification
if padding is not None:
plt.subplots_adjust(**padding)
# Replot: connect function
def on_dblclick(event):
# On the event of a double click, replot the selected subplot
if not event.inaxes:
return
if not event.dblclick:
return
subplots = [axi for axi in f.axes if isinstance(axi, mpl.axes.Subplot)]
inx = list(subplots).index(event.inaxes)
im = replot_ims[inx]
# Use some of the info in the subplot
cm = subplots[inx].images[0].get_cmap()
clim = subplots[inx].images[0].get_clim()
sbar = False
if (scalelist and inx in scalebar) or scalebar == 'all':
sbar = True
im.plot(colorbar=bool(colorbar),
vmin=clim[0],
vmax=clim[1],
no_nans=no_nans,
aspect=asp,
scalebar=sbar,
scalebar_color=scalebar_color,
cmap=cm)
f.canvas.mpl_connect('button_press_event', on_dblclick)
return axes_list
def set_axes_decor(ax, axes_decor):
if axes_decor == 'off':
ax.axis('off')
elif axes_decor == 'ticks':
ax.set_xlabel('')
ax.set_ylabel('')
elif axes_decor == 'all':
pass
elif axes_decor is None:
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_xticklabels([])
ax.set_yticklabels([])
def make_cmap(colors, name='my_colormap', position=None,
bit=False, register=True):
"""
Create a matplotlib colormap with customized colors, optionally registering
it with matplotlib for simplified use.
Adapted from Chris Slocum's code at:
https://github.com/CSlocumWX/custom_colormap/blob/master/custom_colormaps.py
and used under the terms of that code's BSD-3 license
Parameters
----------
colors : iterable
list of either tuples containing rgb values, or html strings
Colors should be arranged so that the first color is the lowest
value for the colorbar and the last is the highest.
name : str
name of colormap to use when registering with matplotlib
position : None or iterable
list containing the values (from [0,1]) that dictate the position
of each color within the colormap. If None (default), the colors
will be equally-spaced within the colorbar.
bit : boolean
True if RGB colors are given in 8-bit [0 to 255] or False if given
in arithmetic basis [0 to 1] (default)
register : boolean
switch to control whether or not to register the custom colormap
with matplotlib in order to enable use by just the name string
"""
def _html_color_to_rgb(color_string):
""" convert #RRGGBB to an (R, G, B) tuple """
color_string = color_string.strip()
if color_string[0] == '#':
color_string = color_string[1:]
if len(color_string) != 6:
raise ValueError(
"input #{} is not in #RRGGBB format".format(color_string))
r, g, b = color_string[:2], color_string[2:4], color_string[4:]
r, g, b = [int(n, 16) / 255 for n in (r, g, b)]
return r, g, b
bit_rgb = np.linspace(0, 1, 256)
if position is None:
position = np.linspace(0, 1, len(colors))
else:
if len(position) != len(colors):
raise ValueError("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
raise ValueError("position must start with 0 and end with 1")
cdict = {'red': [], 'green': [], 'blue': []}
for pos, color in zip(position, colors):
if isinstance(color, str):
color = _html_color_to_rgb(color)
elif bit:
color = (bit_rgb[color[0]],
bit_rgb[color[1]],
bit_rgb[color[2]])
cdict['red'].append((pos, color[0], color[0]))
cdict['green'].append((pos, color[1], color[1]))
cdict['blue'].append((pos, color[2], color[2]))
cmap = mpl.colors.LinearSegmentedColormap(name, cdict, 256)
if register:
mpl.cm.register_cmap(name, cmap)
return cmap
def plot_spectra(
spectra,
style='overlap',
color=None,
line_style=None,
padding=1.,
legend=None,
legend_picking=True,
legend_loc='upper right',
fig=None,
ax=None,
**kwargs):
"""Plot several spectra in the same figure.
Extra keyword arguments are passed to `matplotlib.figure`.
Parameters
----------
spectra : list of Signal1D or BaseSignal
Ordered spectra list of signal to plot. If `style` is "cascade" or
"mosaic" the spectra can have different size and axes. For `BaseSignal`
with navigation dimensions 1 and signal dimension 0, the signal will be
tranposed to form a `Signal1D`.
style : {'overlap', 'cascade', 'mosaic', 'heatmap'}
The style of the plot.
color : matplotlib color or a list of them or `None`
Sets the color of the lines of the plots (no action on 'heatmap').
If a list, if its length is less than the number of spectra to plot,
the colors will be cycled. If `None`, use default matplotlib color
cycle.
line_style: matplotlib line style or a list of them or `None`
Sets the line style of the plots (no action on 'heatmap').
The main line style are '-','--','steps','-.',':'.
If a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
padding : float, optional, default 0.1
Option for "cascade". 1 guarantees that there is not overlapping.
However, in many cases a value between 0 and 1 can produce a tighter
plot without overlapping. Negative values have the same effect but
reverse the order of the spectra without reversing the order of the
colors.
legend: None or list of str or 'auto'
If list of string, legend for "cascade" or title for "mosaic" is
displayed. If 'auto', the title of each spectra (metadata.General.title)
is used.
legend_picking: bool
If true, a spectrum can be toggle on and off by clicking on
the legended line.
legend_loc : str or int
This parameter controls where the legend is placed on the figure;
see the pyplot.legend docstring for valid values
fig : matplotlib figure or None
If None, a default figure will be created. Specifying fig will
not work for the 'heatmap' style.
ax : matplotlib ax (subplot) or None
If None, a default ax will be created. Will not work for 'mosaic'
or 'heatmap' style.
**kwargs
remaining keyword arguments are passed to matplotlib.figure() or
matplotlib.subplots(). Has no effect on 'heatmap' style.
Example
-------
>>> s = hs.load("some_spectra")
>>> hs.plot.plot_spectra(s, style='cascade', color='red', padding=0.5)
To save the plot as a png-file
>>> hs.plot.plot_spectra(s).figure.savefig("test.png")
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
import hyperspy.signal
def _reverse_legend(ax_, legend_loc_):
"""
Reverse the ordering of a matplotlib legend (to be more consistent
with the default ordering of plots in the 'cascade' and 'overlap'
styles
Parameters
----------
ax_: matplotlib axes
legend_loc_: str or int
This parameter controls where the legend is placed on the
figure; see the pyplot.legend docstring for valid values
"""
l = ax_.get_legend()
labels = [lb.get_text() for lb in list(l.get_texts())]
handles = l.legendHandles
ax_.legend(handles[::-1], labels[::-1], loc=legend_loc_)
# Before v1.3 default would read the value from prefereces.
if style == "default":
style = "overlap"
if color is not None:
if isinstance(color, str):
color = itertools.cycle([color])
elif hasattr(color, "__iter__"):
color = itertools.cycle(color)
else:
raise ValueError("Color must be None, a valid matplotlib color "
"string or a list of valid matplotlib colors.")
else:
if LooseVersion(mpl.__version__) >= "1.5.3":
color = itertools.cycle(
plt.rcParams['axes.prop_cycle'].by_key()["color"])
else:
color = itertools.cycle(plt.rcParams['axes.color_cycle'])
if line_style is not None:
if isinstance(line_style, str):
line_style = itertools.cycle([line_style])
elif hasattr(line_style, "__iter__"):
line_style = itertools.cycle(line_style)
else:
raise ValueError("line_style must be None, a valid matplotlib"
" line_style string or a list of valid matplotlib"
" line_style.")
else:
line_style = ['-'] * len(spectra)
if legend is not None:
if isinstance(legend, str):
if legend == 'auto':
legend = [spec.metadata.General.title for spec in spectra]
else:
raise ValueError("legend must be None, 'auto' or a list of"
" string")
elif hasattr(legend, "__iter__"):
legend = itertools.cycle(legend)
if style == 'overlap':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_overlap_plot(spectra,
ax,
color=color,
line_style=line_style,)
if legend is not None:
ax.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
if legend_picking is True:
animate_legend(fig=fig, ax=ax)
elif style == 'cascade':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_cascade_subplot(spectra,
ax,
color=color,
line_style=line_style,
padding=padding)
if legend is not None:
plt.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
elif style == 'mosaic':
default_fsize = plt.rcParams["figure.figsize"]
figsize = (default_fsize[0], default_fsize[1] * len(spectra))
fig, subplots = plt.subplots(
len(spectra), 1, figsize=figsize, **kwargs)
if legend is None:
legend = [legend] * len(spectra)
for spectrum, ax, color, line_style, legend in zip(
spectra, subplots, color, line_style, legend):
spectrum = _transpose_if_required(spectrum, 1)
_plot_spectrum(spectrum, ax, color=color, line_style=line_style)
ax.set_ylabel('Intensity')
if legend is not None:
ax.set_title(legend)
if not isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
if isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
fig.tight_layout()
elif style == 'heatmap':
if not isinstance(spectra, hyperspy.signal.BaseSignal):
import hyperspy.utils
spectra = [_transpose_if_required(spectrum, 1) for spectrum in
spectra]
spectra = hyperspy.utils.stack(spectra)
with spectra.unfolded():
ax = _make_heatmap_subplot(spectra)
ax.set_ylabel('Spectra')
ax = ax if style != "mosaic" else subplots
return ax
def animate_legend(fig=None, ax=None):
"""Animate the legend of a figure.
A spectrum can be toggle on and off by clicking on the legended line.
Parameters
----------
fig: None | matplotlib.figure
If None pick the current figure using "plt.gcf"
ax: None | matplotlib.axes
If None pick the current axes using "plt.gca".
Note
----
Code inspired from legend_picking.py in the matplotlib gallery
"""
if fig is None:
fig = plt.gcf()
if ax is None:
ax = plt.gca()
lines = ax.lines[::-1]
lined = dict()
leg = ax.get_legend()
for legline, origline in zip(leg.get_lines(), lines):
legline.set_picker(5) # 5 pts tolerance
lined[legline] = origline
def onpick(event):
# on the pick event, find the orig line corresponding to the
# legend proxy line, and toggle the visibility
legline = event.artist
if legline.axes == ax:
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines
# have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('pick_event', onpick)
def plot_histograms(signal_list,
bins='freedman',
range_bins=None,
color=None,
line_style=None,
legend='auto',
fig=None,
**kwargs):
"""Plot the histogram of every signal in the list in the same figure.
This function creates a histogram for each signal and plot the list with
the `utils.plot.plot_spectra` function.
Parameters
----------
signal_list : iterable
Ordered spectra list to plot. If `style` is "cascade" or "mosaic"
the spectra can have different size and axes.
bins : int or list or str, optional
If bins is a string, then it must be one of:
'knuth' : use Knuth's rule to determine bins
'scotts' : use Scott's rule to determine bins
'freedman' : use the Freedman-diaconis rule to determine bins
'blocks' : use bayesian blocks for dynamic bin widths
range_bins : tuple or None, optional.
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
color : valid matplotlib color or a list of them or `None`, optional.
Sets the color of the lines of the plots. If a list, if its length is
less than the number of spectra to plot, the colors will be cycled. If
If `None`, use default matplotlib color cycle.
line_style: valid matplotlib line style or a list of them or `None`,
optional.
The main line style are '-','--','steps','-.',':'.
If a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
legend: None or list of str or 'auto', optional.
Display a legend. If 'auto', the title of each spectra
(metadata.General.title) is used.
legend_picking: bool, optional.
If true, a spectrum can be toggle on and off by clicking on
the legended line.
fig : matplotlib figure or None, optional.
If None, a default figure will be created.
**kwargs
other keyword arguments (weight and density) are described in
np.histogram().
Example
-------
Histograms of two random chi-square distributions
>>> img = hs.signals.Signal2D(np.random.chisquare(1,[10,10,100]))
>>> img2 = hs.signals.Signal2D(np.random.chisquare(2,[10,10,100]))
>>> hs.plot.plot_histograms([img,img2],legend=['hist1','hist2'])
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
hists = []
for obj in signal_list:
hists.append(obj.get_histogram(bins=bins,
range_bins=range_bins, **kwargs))
if line_style is None:
line_style = 'steps'
return plot_spectra(hists, style='overlap', color=color,
line_style=line_style, legend=legend, fig=fig)
| gpl-3.0 |
rg3915/django-experience | djexperience/settings.py | 1 | 3763 | import os
from decouple import config, Csv
from dj_database_url import parse as dburl
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', default=[], cast=Csv())
# Application definition
INSTALLED_APPS = [
# my apps
'djexperience.core',
# default django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# thirty apps
'django_extensions',
'bootstrapform',
'widget_tweaks',
'daterange_filter',
'django_activeurl',
'import_export',
'django_tables2',
# my apps
'djexperience.bookstore',
'djexperience.company',
'djexperience.crm',
'djexperience.myemail',
'djexperience.product',
'djexperience.selling',
'djexperience.service',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djexperience.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djexperience.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
default_dburl = 'sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
DATABASES = {
'default': config('DATABASE_URL', default=default_dburl, cast=dburl),
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = config('DEFAULT_FROM_EMAIL')
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
USE_THOUSAND_SEPARATOR = True
DECIMAL_SEPARATOR = ','
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
LOGIN_URL = '/admin/login/'
| mit |
popazerty/obh-gui | lib/python/Components/RcModel.py | 30 | 1436 | import os
from Tools.HardwareInfo import HardwareInfo
from Tools.Directories import SCOPE_SKIN, resolveFilename
class RcModel:
RcModels = {}
def __init__(self):
self.model = HardwareInfo().get_device_model()
# cfg files has modelname rcname entries.
# modelname is boxname optionally followed by .rctype
for line in open((resolveFilename(SCOPE_SKIN, 'rc_models/rc_models.cfg')), 'r'):
if line.startswith(self.model):
m, r = line.strip().split()
self.RcModels[m] = r
def rcIsDefault(self):
# Default RC can only happen with DMM type remote controls...
return self.model.startswith('dm')
def getRcFile(self, ext):
# check for rc/type every time so rctype changes will be noticed
if os.path.exists('/proc/stb/ir/rc/type'):
rc = open('/proc/stb/ir/rc/type').read().strip()
modeltype = '%s.%s' % (self.model, rc)
else:
modeltype = None
if modeltype is not None and modeltype in self.RcModels.keys():
remote = self.RcModels[modeltype]
elif self.model in self.RcModels.keys():
remote = self.RcModels[self.model]
else:
remote = 'dmm' # default. Assume files for dmm exists
f = resolveFilename(SCOPE_SKIN, 'rc_models/' + remote + '.' + ext)
if not os.path.exists(f):
f = resolveFilename(SCOPE_SKIN, 'rc_models/dmm.' + ext)
return f
def getRcImg(self):
return self.getRcFile('png')
def getRcPositions(self):
return self.getRcFile('xml')
rc_model = RcModel()
| gpl-2.0 |
tgroh/incubator-beam | sdks/python/apache_beam/examples/complete/juliaset/setup.py | 4 | 4732 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Setup.py module for the workflow's worker utilities.
All the workflow related code is gathered in a package that will be built as a
source distribution, staged in the staging area for the workflow being run and
then installed in the workers when they start running.
This behavior is triggered by specifying the --setup_file command line option
when running the workflow for remote execution.
"""
from __future__ import print_function
import subprocess
from distutils.command.build import build as _build
import setuptools
# This class handles the pip install mechanism.
class build(_build): # pylint: disable=invalid-name
"""A build command class that will be invoked during package install.
The package built using the current setup.py will be staged and later
installed in the worker using `pip install package'. This class will be
instantiated during install for this specific scenario and will trigger
running the custom commands specified.
"""
sub_commands = _build.sub_commands + [('CustomCommands', None)]
# Some custom command to run during setup. The command is not essential for this
# workflow. It is used here as an example. Each command will spawn a child
# process. Typically, these commands will include steps to install non-Python
# packages. For instance, to install a C++-based library libjpeg62 the following
# two commands will have to be added:
#
# ['apt-get', 'update'],
# ['apt-get', '--assume-yes', 'install', 'libjpeg62'],
#
# First, note that there is no need to use the sudo command because the setup
# script runs with appropriate access.
# Second, if apt-get tool is used then the first command needs to be 'apt-get
# update' so the tool refreshes itself and initializes links to download
# repositories. Without this initial step the other apt-get install commands
# will fail with package not found errors. Note also --assume-yes option which
# shortcuts the interactive confirmation.
#
# Note that in this example custom commands will run after installing required
# packages. If you have a PyPI package that depends on one of the custom
# commands, move installation of the dependent package to the list of custom
# commands, e.g.:
#
# ['pip', 'install', 'my_package'],
#
# TODO(BEAM-3237): Output from the custom commands are missing from the logs.
# The output of custom commands (including failures) will be logged in the
# worker-startup log.
CUSTOM_COMMANDS = [
['echo', 'Custom command worked!']]
class CustomCommands(setuptools.Command):
"""A setuptools Command class able to run arbitrary commands."""
def initialize_options(self):
pass
def finalize_options(self):
pass
def RunCustomCommand(self, command_list):
print('Running command: %s' % command_list)
p = subprocess.Popen(
command_list,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Can use communicate(input='y\n'.encode()) if the command run requires
# some confirmation.
stdout_data, _ = p.communicate()
print('Command output: %s' % stdout_data)
if p.returncode != 0:
raise RuntimeError(
'Command %s failed: exit code: %s' % (command_list, p.returncode))
def run(self):
for command in CUSTOM_COMMANDS:
self.RunCustomCommand(command)
# Configure the required packages and scripts to install.
# Note that the Python Dataflow containers come with numpy already installed
# so this dependency will not trigger anything to be installed unless a version
# restriction is specified.
REQUIRED_PACKAGES = [
'numpy',
]
setuptools.setup(
name='juliaset',
version='0.0.1',
description='Julia set workflow package.',
install_requires=REQUIRED_PACKAGES,
packages=setuptools.find_packages(),
cmdclass={
# Command class instantiated and run during pip install scenarios.
'build': build,
'CustomCommands': CustomCommands,
}
)
| apache-2.0 |
linegpe/FYS3150 | Project4/expect_random_T1.py | 1 | 3161 | import numpy as np
import matplotlib.pyplot as plt
data1 = np.loadtxt("expect_random_T1.00.dat")
data2 = np.loadtxt("expect_ordered_T1.00.dat")
data3 = np.loadtxt("expect_random2_T2.40.dat")
data4 = np.loadtxt("expect_ordered2_T2.40.dat")
values1 = data1[0::1]
values2 = data2[0::1]
values3 = data3[0::1]
values4 = data4[0::1]
N1 = len(values1)
x1 = np.linspace(0,N1,N1)
N2 = len(values3)
x2 = np.linspace(0,N2,N2)
figure1 = plt.figure()
labels = figure1.add_subplot(111)
# Turn off axis lines and ticks of the big subplot
labels.spines['top'].set_color('none')
labels.spines['bottom'].set_color('none')
labels.spines['left'].set_color('none')
labels.spines['right'].set_color('none')
labels.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.ylabel("Mean energy per spin",fontsize=15)
#figure1.yaxis.set_ticks_position(right)
#figure1.ylabel.set_ticks_position('left')
#figure1.yaxis.tick_right()
fig1 = figure1.add_subplot(211)
fig1.plot(x1,values1[:,0],label="Random initial spins, T=1")
fig1.plot(x1,values2[:,0],label="Ordered initial spins, T=1")
fig1.tick_params(axis='x', labelsize=15) #HOW TO PUT THIS ON THE RIGHT SIDE?
fig1.tick_params(axis='y', labelsize=15)
fig1.yaxis.tick_right()
#plt.ylabel(r"$\langle E\rangle /L^2$",fontsize=17)
#plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.legend()
plt.axis([0,N1,-3,0])
#plt.show()
fig2 = figure1.add_subplot(212)
fig2.plot(x2,values3[:,0],label="Random initial spins, T=2.4")
fig2.plot(x2,values4[:,0],label="Ordered initial spins, T=2.4")
fig2.tick_params(axis='x', labelsize=15)
fig2.tick_params(axis='y', labelsize=15)
fig2.yaxis.tick_right()
#plt.ylabel(r"$\langle E\rangle /L^2$",fontsize=15)
#plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.legend()
plt.axis([0,50000,-2,-0.4])
plt.show()
figure2 = plt.figure()
labels = figure2.add_subplot(111)
labels.spines['top'].set_color('none')
labels.spines['bottom'].set_color('none')
labels.spines['left'].set_color('none')
labels.spines['right'].set_color('none')
labels.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.ylabel("Absolute magnetization per spin",fontsize=15)
fig1 = figure2.add_subplot(211)
fig1.plot(x1,values1[:,1],label="Random initial spins, T=1")
fig1.plot(x1,values2[:,1],label="Ordered initial spins, T=1")
fig1.tick_params(axis='x', labelsize=15)
fig1.tick_params(axis='y', labelsize=15)
fig1.yaxis.tick_right()
#fig2.ylabel(r"$abs(\langle M \rangle /L^2)$",fontsize=15)
#fig2.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.legend()
plt.axis([0,N1,0.2,1.6])
#plt.show()
fig2 = figure2.add_subplot(212)
fig2.plot(x2,values3[:,1],label="Random initial spins, T=2.4")
fig2.plot(x2,values4[:,1],label="Ordered initial spins, T=2.4")
fig2.tick_params(axis='x', labelsize=15)
fig2.tick_params(axis='y', labelsize=15)
fig2.yaxis.tick_right()
#plt.ylabel(r"$abs(\langle M\rangle / L^2)$",fontsize=15)
#plt.xlabel("Number of Monte Carlo cycles",fontsize=15)
plt.legend()
#plt.axis([0,8e6,-0.1,1.4])
plt.show() | gpl-3.0 |
shoyer/xray | xarray/backends/locks.py | 1 | 5397 | import multiprocessing
import threading
import weakref
from typing import Any, MutableMapping
try:
from dask.utils import SerializableLock
except ImportError:
# no need to worry about serializing the lock
SerializableLock = threading.Lock
try:
from dask.distributed import Lock as DistributedLock
except ImportError:
DistributedLock = None
# Locks used by multiple backends.
# Neither HDF5 nor the netCDF-C library are thread-safe.
HDF5_LOCK = SerializableLock()
NETCDFC_LOCK = SerializableLock()
_FILE_LOCKS = weakref.WeakValueDictionary() # type: MutableMapping[Any, threading.Lock] # noqa
def _get_threaded_lock(key):
try:
lock = _FILE_LOCKS[key]
except KeyError:
lock = _FILE_LOCKS[key] = threading.Lock()
return lock
def _get_multiprocessing_lock(key):
# TODO: make use of the key -- maybe use locket.py?
# https://github.com/mwilliamson/locket.py
del key # unused
return multiprocessing.Lock()
_LOCK_MAKERS = {
None: _get_threaded_lock,
'threaded': _get_threaded_lock,
'multiprocessing': _get_multiprocessing_lock,
'distributed': DistributedLock,
}
def _get_lock_maker(scheduler=None):
"""Returns an appropriate function for creating resource locks.
Parameters
----------
scheduler : str or None
Dask scheduler being used.
See Also
--------
dask.utils.get_scheduler_lock
"""
return _LOCK_MAKERS[scheduler]
def _get_scheduler(get=None, collection=None):
"""Determine the dask scheduler that is being used.
None is returned if no dask scheduler is active.
See also
--------
dask.base.get_scheduler
"""
try:
# dask 0.18.1 and later
from dask.base import get_scheduler
actual_get = get_scheduler(get, collection)
except ImportError:
try:
from dask.utils import effective_get
actual_get = effective_get(get, collection)
except ImportError:
return None
try:
from dask.distributed import Client
if isinstance(actual_get.__self__, Client):
return 'distributed'
except (ImportError, AttributeError):
try:
import dask.multiprocessing
if actual_get == dask.multiprocessing.get:
return 'multiprocessing'
else:
return 'threaded'
except ImportError:
return 'threaded'
def get_write_lock(key):
"""Get a scheduler appropriate lock for writing to the given resource.
Parameters
----------
key : str
Name of the resource for which to acquire a lock. Typically a filename.
Returns
-------
Lock object that can be used like a threading.Lock object.
"""
scheduler = _get_scheduler()
lock_maker = _get_lock_maker(scheduler)
return lock_maker(key)
def acquire(lock, blocking=True):
"""Acquire a lock, possibly in a non-blocking fashion.
Includes backwards compatibility hacks for old versions of Python, dask
and dask-distributed.
"""
if blocking:
# no arguments needed
return lock.acquire()
elif DistributedLock is not None and isinstance(lock, DistributedLock):
# distributed.Lock doesn't support the blocking argument yet:
# https://github.com/dask/distributed/pull/2412
return lock.acquire(timeout=0)
else:
# "blocking" keyword argument not supported for:
# - threading.Lock on Python 2.
# - dask.SerializableLock with dask v1.0.0 or earlier.
# - multiprocessing.Lock calls the argument "block" instead.
return lock.acquire(blocking)
class CombinedLock:
"""A combination of multiple locks.
Like a locked door, a CombinedLock is locked if any of its constituent
locks are locked.
"""
def __init__(self, locks):
self.locks = tuple(set(locks)) # remove duplicates
def acquire(self, blocking=True):
return all(acquire(lock, blocking=blocking) for lock in self.locks)
def release(self):
for lock in self.locks:
lock.release()
def __enter__(self):
for lock in self.locks:
lock.__enter__()
def __exit__(self, *args):
for lock in self.locks:
lock.__exit__(*args)
def locked(self):
return any(lock.locked for lock in self.locks)
def __repr__(self):
return "CombinedLock(%r)" % list(self.locks)
class DummyLock:
"""DummyLock provides the lock API without any actual locking."""
def acquire(self, blocking=True):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
def locked(self):
return False
def combine_locks(locks):
"""Combine a sequence of locks into a single lock."""
all_locks = []
for lock in locks:
if isinstance(lock, CombinedLock):
all_locks.extend(lock.locks)
elif lock is not None:
all_locks.append(lock)
num_locks = len(all_locks)
if num_locks > 1:
return CombinedLock(all_locks)
elif num_locks == 1:
return all_locks[0]
else:
return DummyLock()
def ensure_lock(lock):
"""Ensure that the given object is a lock."""
if lock is None or lock is False:
return DummyLock()
return lock
| apache-2.0 |
vodik/pacman | test/pacman/pmpkg.py | 2 | 7696 | #! /usr/bin/python2
#
# Copyright (c) 2006 by Aurelien Foret <[email protected]>
# Copyright (c) 2006-2013 Pacman Development Team <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import tempfile
import stat
import shutil
from StringIO import StringIO
import tarfile
import util
class pmpkg(object):
"""Package object.
Object holding data from an Arch Linux package.
"""
def __init__(self, name, version = "1.0-1"):
self.path = "" #the path of the generated package
# desc
self.name = name
self.version = version
self.desc = ""
self.groups = []
self.url = ""
self.license = []
self.arch = ""
self.builddate = ""
self.installdate = ""
self.packager = ""
self.size = 0
self.csize = 0
self.isize = 0
self.reason = 0
self.md5sum = "" # sync only
self.pgpsig = "" # sync only
self.replaces = []
self.depends = []
self.optdepends = []
self.conflicts = []
self.provides = []
# files
self.files = []
self.backup = []
# install
self.install = {
"pre_install": "",
"post_install": "",
"pre_remove": "",
"post_remove": "",
"pre_upgrade": "",
"post_upgrade": "",
}
self.path = None
self.finalized = False
def __str__(self):
s = ["%s" % self.fullname()]
s.append("description: %s" % self.desc)
s.append("url: %s" % self.url)
s.append("files: %s" % " ".join(self.files))
s.append("reason: %d" % self.reason)
return "\n".join(s)
def fullname(self):
"""Long name of a package.
Returns a string formatted as follows: "pkgname-pkgver".
"""
return "%s-%s" % (self.name, self.version)
def filename(self):
"""File name of a package, including its extension.
Returns a string formatted as follows: "pkgname-pkgver.PKG_EXT_PKG".
"""
return "%s%s" % (self.fullname(), util.PM_EXT_PKG)
@staticmethod
def parse_filename(name):
filename = name
if filename[-1] == "*":
filename = filename.rstrip("*")
if filename.find(" -> ") != -1:
filename, extra = filename.split(" -> ")
elif filename.find("|") != -1:
filename, extra = filename.split("|")
return filename
def makepkg(self, path):
"""Creates an Arch Linux package archive.
A package archive is generated in the location 'path', based on the data
from the object.
"""
archive_files = []
# .PKGINFO
data = ["pkgname = %s" % self.name]
data.append("pkgver = %s" % self.version)
data.append("pkgdesc = %s" % self.desc)
data.append("url = %s" % self.url)
data.append("builddate = %s" % self.builddate)
data.append("packager = %s" % self.packager)
data.append("size = %s" % self.size)
if self.arch:
data.append("arch = %s" % self.arch)
for i in self.license:
data.append("license = %s" % i)
for i in self.replaces:
data.append("replaces = %s" % i)
for i in self.groups:
data.append("group = %s" % i)
for i in self.depends:
data.append("depend = %s" % i)
for i in self.optdepends:
data.append("optdepend = %s" % i)
for i in self.conflicts:
data.append("conflict = %s" % i)
for i in self.provides:
data.append("provides = %s" % i)
for i in self.backup:
data.append("backup = %s" % i)
archive_files.append((".PKGINFO", "\n".join(data)))
# .INSTALL
if any(self.install.values()):
archive_files.append((".INSTALL", self.installfile()))
self.path = os.path.join(path, self.filename())
util.mkdir(os.path.dirname(self.path))
# Generate package metadata
tar = tarfile.open(self.path, "w:gz")
for name, data in archive_files:
info = tarfile.TarInfo(name)
info.size = len(data)
tar.addfile(info, StringIO(data))
# Generate package file system
for name in self.files:
fileinfo = util.getfileinfo(name)
info = tarfile.TarInfo(fileinfo["filename"])
if fileinfo["hasperms"]:
info.mode = fileinfo["perms"]
elif fileinfo["isdir"]:
info.mode = 0o755
if fileinfo["isdir"]:
info.type = tarfile.DIRTYPE
tar.addfile(info)
elif fileinfo["islink"]:
info.type = tarfile.SYMTYPE
info.linkname = fileinfo["link"]
tar.addfile(info)
else:
# TODO wow what a hack, adding a newline to match mkfile?
filedata = name + "\n"
info.size = len(filedata)
tar.addfile(info, StringIO(filedata))
tar.close()
def install_package(self, root):
"""Install the package in the given root."""
for f in self.files:
util.mkfile(root, f, f)
path = os.path.join(root, f)
if os.path.isfile(path):
os.utime(path, (355, 355))
def filelist(self):
"""Generate a list of package files."""
return sorted([self.parse_filename(f) for f in self.files])
def finalize(self):
"""Perform any necessary operations to ready the package for use."""
if self.finalized:
return
# add missing parent dirs to file list
# use bare file names so trailing ' -> ', '*', etc don't throw off the
# checks for existing files
file_names = self.filelist()
for name in list(file_names):
if os.path.isabs(name):
raise ValueError("Absolute path in filelist '%s'." % name)
name = os.path.dirname(name.rstrip("/"))
while name:
if name in file_names:
# path exists as both a file and a directory
raise ValueError("Duplicate path in filelist '%s'." % name)
elif name + "/" in file_names:
# path was either manually included or already processed
break
else:
file_names.append(name + "/")
self.files.append(name + "/")
name = os.path.dirname(name)
self.files.sort()
self.finalized = True
def local_backup_entries(self):
return ["%s\t%s" % (self.parse_filename(i), util.mkmd5sum(i)) for i in self.backup]
def installfile(self):
data = []
for key, value in self.install.items():
if value:
data.append("%s() {\n%s\n}\n" % (key, value))
return "\n".join(data)
# vim: set ts=4 sw=4 et:
| gpl-2.0 |
notifico/notifico | notifico/services/messages.py | 3 | 1916 | # -*- coding: utf8 -*-
__all__ = ('MessageService',)
import json
class MessageService(object):
#: Key name for the outgoing message queue.
key_queue_messages = 'queue_message'
#: Key name for recent messages.
key_recent_messages = 'recent_messages'
def __init__(self, redis=None):
self._redis = redis
@property
def r(self):
return self._redis
def recent_messages(self, start=0, stop=-1):
"""
Returns a list of recent messages from `start` to `stop`.
"""
if not self.r:
return []
return [
json.loads(m) for m in self.r.lrange(
self.key_recent_messages, start, stop
)
]
def send_message(self, message, channel):
"""
Sends `message` to `channel`.
"""
final_message = {
# What we're delivering.
'type': 'message',
# Contents of the message.
'payload': {
'msg': message.replace('\n', '').replace('\r', '')
},
# Destination.
'channel': {
'channel': channel.channel,
'host': channel.host,
'port': channel.port,
'ssl': channel.ssl
}
}
message_dump = json.dumps(final_message)
self.r.rpush(self.key_queue_messages, message_dump)
def log_message(self, message, project, log_cap=200):
"""
Log up to `log_cap` messages,
"""
final_message = {
'msg': message,
'project_id': project.id,
'owner_id': project.owner.id
}
message_dump = json.dumps(final_message)
with self.r.pipeline() as pipe:
pipe.lpush(self.key_recent_messages, message_dump)
pipe.ltrim(self.key_recent_messages, 0, log_cap)
pipe.execute()
| mit |
brahle/eval2 | scripts/haski/actions/reviewaction.py | 1 | 1578 | #!/usr/bin/env python3.2
# Copyright 2011 Bruno Rahle
#
# This file is part of Evaluator.
#
# Evaluator is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Evaluator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Evaluator. If not, see
# <http://www.gnu.org/licenses/>.
from actions.baseaction import BaseHaskiAction
import argparse
class ReviewAction(BaseHaskiAction):
"""This class is the class that does linting work.
"""
RB_ID_STR = 'reviewboard id'
def __call__(self, params):
"""Fetches the desired revision and then sends it to reviewboard.
"""
commit = self.get_commit(params)
if not params.skip_lint:
commit.lint(params)
rb_id = commit.review()
if params.revision != 'HEAD':
if self.RB_ID_STR not in commit.message.fields:
print('[WARNING] Please edit the message to incorporate',
'`ReviewBoardID` field.')
else:
commit.message.set_field(self.RB_ID_STR, rb_id)
commit.amend()
def main():
pass
if __name__ == '__main__':
main()
| agpl-3.0 |
crosswalk-project/chromium-crosswalk-efl | tools/telemetry/telemetry/core/platform/profiler/android_profiling_helper_unittest.py | 26 | 4767 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import pickle
import re
import shutil
import tempfile
import unittest
from telemetry import benchmark
from telemetry.core import util
from telemetry.core.platform.profiler import android_profiling_helper
from telemetry.unittest import simple_mock
from telemetry.unittest import tab_test_case
def _GetLibrariesMappedIntoProcesses(device, pids):
libs = set()
for pid in pids:
maps_file = '/proc/%d/maps' % pid
maps = device.ReadFile(maps_file, as_root=True)
for map_line in maps:
lib = re.match(r'.*\s(/.*[.]so)$', map_line)
if lib:
libs.add(lib.group(1))
return libs
class TestAndroidProfilingHelper(unittest.TestCase):
def testGetRequiredLibrariesForPerfProfile(self):
perf_output = os.path.join(
util.GetUnittestDataDir(), 'sample_perf_report_output.txt')
with open(perf_output) as f:
perf_output = f.read()
mock_popen = simple_mock.MockObject()
mock_popen.ExpectCall('communicate').WillReturn([None, perf_output])
mock_subprocess = simple_mock.MockObject()
mock_subprocess.ExpectCall(
'Popen').WithArgs(simple_mock.DONT_CARE).WillReturn(mock_popen)
mock_subprocess.SetAttribute('PIPE', simple_mock.MockObject())
real_subprocess = android_profiling_helper.subprocess
android_profiling_helper.subprocess = mock_subprocess
try:
libs = android_profiling_helper.GetRequiredLibrariesForPerfProfile('foo')
self.assertEqual(libs, set([
'/data/app-lib/com.google.android.apps.chrome-2/libchrome.2016.0.so',
'/system/lib/libart.so',
'/system/lib/libc.so',
'/system/lib/libm.so']))
finally:
android_profiling_helper.subprocess = real_subprocess
@benchmark.Enabled('android')
def testGetRequiredLibrariesForVTuneProfile(self):
vtune_db_output = os.path.join(
util.GetUnittestDataDir(), 'sample_vtune_db_output')
with open(vtune_db_output, 'rb') as f:
vtune_db_output = pickle.load(f)
mock_cursor = simple_mock.MockObject()
mock_cursor.ExpectCall(
'execute').WithArgs(simple_mock.DONT_CARE).WillReturn(vtune_db_output)
mock_conn = simple_mock.MockObject()
mock_conn.ExpectCall('cursor').WillReturn(mock_cursor)
mock_conn.ExpectCall('close')
mock_sqlite3 = simple_mock.MockObject()
mock_sqlite3.ExpectCall(
'connect').WithArgs(simple_mock.DONT_CARE).WillReturn(mock_conn)
real_sqlite3 = android_profiling_helper.sqlite3
android_profiling_helper.sqlite3 = mock_sqlite3
try:
libs = android_profiling_helper.GetRequiredLibrariesForVTuneProfile('foo')
self.assertEqual(libs, set([
'/data/app-lib/com.google.android.apps.chrome-1/libchrome.2019.0.so',
'/system/lib/libdvm.so',
'/system/lib/libc.so',
'/system/lib/libm.so']))
finally:
android_profiling_helper.sqlite3 = real_sqlite3
class TestAndroidProfilingHelperTabTestCase(tab_test_case.TabTestCase):
def setUp(self):
super(TestAndroidProfilingHelperTabTestCase, self).setUp()
# pylint: disable=W0212
browser_backend = self._browser._browser_backend
self._device = browser_backend._adb.device()
@benchmark.Enabled('android')
def testCreateSymFs(self):
# pylint: disable=W0212
browser_pid = self._browser._browser_backend.pid
pids = ([browser_pid] +
self._browser._platform_backend.GetChildPids(browser_pid))
libs = _GetLibrariesMappedIntoProcesses(self._device, pids)
assert libs
symfs_dir = tempfile.mkdtemp()
try:
kallsyms = android_profiling_helper.CreateSymFs(self._device, symfs_dir,
libs)
# Check that we have kernel symbols.
assert os.path.exists(kallsyms)
is_unstripped = re.compile('^/data/app/.*\.so$')
has_unstripped = False
# Check that all requested libraries are present.
for lib in libs:
has_unstripped = has_unstripped or is_unstripped.match(lib)
assert os.path.exists(os.path.join(symfs_dir, lib[1:])), \
'%s not found in symfs' % lib
# Make sure we found at least one unstripped library.
assert has_unstripped
finally:
shutil.rmtree(symfs_dir)
@benchmark.Enabled('android')
def testGetToolchainBinaryPath(self):
with tempfile.NamedTemporaryFile() as libc:
self._device.PullFile('/system/lib/libc.so', libc.name)
path = android_profiling_helper.GetToolchainBinaryPath(libc.name,
'objdump')
assert os.path.exists(path)
| bsd-3-clause |
skidzo/sympy | sympy/simplify/tests/test_powsimp.py | 9 | 11985 | from sympy import (
symbols, powsimp, symbols, MatrixSymbol, sqrt, pi, Mul, gamma, Function,
S, I, exp, simplify, sin, E, log, hyper, Symbol, Dummy, powdenest, root,
Rational)
from sympy.abc import x, y, z, t, a, b, c, d, e, f, g, h, i, k
def test_powsimp():
x, y, z, n = symbols('x,y,z,n')
f = Function('f')
assert powsimp( 4**x * 2**(-x) * 2**(-x) ) == 1
assert powsimp( (-4)**x * (-2)**(-x) * 2**(-x) ) == 1
assert powsimp(
f(4**x * 2**(-x) * 2**(-x)) ) == f(4**x * 2**(-x) * 2**(-x))
assert powsimp( f(4**x * 2**(-x) * 2**(-x)), deep=True ) == f(1)
assert exp(x)*exp(y) == exp(x)*exp(y)
assert powsimp(exp(x)*exp(y)) == exp(x + y)
assert powsimp(exp(x)*exp(y)*2**x*2**y) == (2*E)**(x + y)
assert powsimp(exp(x)*exp(y)*2**x*2**y, combine='exp') == \
exp(x + y)*2**(x + y)
assert powsimp(exp(x)*exp(y)*exp(2)*sin(x) + sin(y) + 2**x*2**y) == \
exp(2 + x + y)*sin(x) + sin(y) + 2**(x + y)
assert powsimp(sin(exp(x)*exp(y))) == sin(exp(x)*exp(y))
assert powsimp(sin(exp(x)*exp(y)), deep=True) == sin(exp(x + y))
assert powsimp(x**2*x**y) == x**(2 + y)
# This should remain factored, because 'exp' with deep=True is supposed
# to act like old automatic exponent combining.
assert powsimp((1 + E*exp(E))*exp(-E), combine='exp', deep=True) == \
(1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), deep=True) == \
(1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E)) == (1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), combine='exp') == \
(1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), combine='base') == \
(1 + E*exp(E))*exp(-E)
x, y = symbols('x,y', nonnegative=True)
n = Symbol('n', real=True)
assert powsimp(y**n * (y/x)**(-n)) == x**n
assert powsimp(x**(x**(x*y)*y**(x*y))*y**(x**(x*y)*y**(x*y)), deep=True) \
== (x*y)**(x*y)**(x*y)
assert powsimp(2**(2**(2*x)*x), deep=False) == 2**(2**(2*x)*x)
assert powsimp(2**(2**(2*x)*x), deep=True) == 2**(x*4**x)
assert powsimp(
exp(-x + exp(-x)*exp(-x*log(x))), deep=False, combine='exp') == \
exp(-x + exp(-x)*exp(-x*log(x)))
assert powsimp(
exp(-x + exp(-x)*exp(-x*log(x))), deep=False, combine='exp') == \
exp(-x + exp(-x)*exp(-x*log(x)))
assert powsimp((x + y)/(3*z), deep=False, combine='exp') == (x + y)/(3*z)
assert powsimp((x/3 + y/3)/z, deep=True, combine='exp') == (x/3 + y/3)/z
assert powsimp(exp(x)/(1 + exp(x)*exp(y)), deep=True) == \
exp(x)/(1 + exp(x + y))
assert powsimp(x*y**(z**x*z**y), deep=True) == x*y**(z**(x + y))
assert powsimp((z**x*z**y)**x, deep=True) == (z**(x + y))**x
assert powsimp(x*(z**x*z**y)**x, deep=True) == x*(z**(x + y))**x
p = symbols('p', positive=True)
assert powsimp((1/x)**log(2)/x) == (1/x)**(1 + log(2))
assert powsimp((1/p)**log(2)/p) == p**(-1 - log(2))
# coefficient of exponent can only be simplified for positive bases
assert powsimp(2**(2*x)) == 4**x
assert powsimp((-1)**(2*x)) == (-1)**(2*x)
i = symbols('i', integer=True)
assert powsimp((-1)**(2*i)) == 1
assert powsimp((-1)**(-x)) != (-1)**x # could be 1/((-1)**x), but is not
# force=True overrides assumptions
assert powsimp((-1)**(2*x), force=True) == 1
# rational exponents allow combining of negative terms
w, n, m = symbols('w n m', negative=True)
e = i/a # not a rational exponent if `a` is unknown
ex = w**e*n**e*m**e
assert powsimp(ex) == m**(i/a)*n**(i/a)*w**(i/a)
e = i/3
ex = w**e*n**e*m**e
assert powsimp(ex) == (-1)**i*(-m*n*w)**(i/3)
e = (3 + i)/i
ex = w**e*n**e*m**e
assert powsimp(ex) == (-1)**(3*e)*(-m*n*w)**e
eq = x**(2*a/3)
# eq != (x**a)**(2/3) (try x = -1 and a = 3 to see)
assert powsimp(eq).exp == eq.exp == 2*a/3
# powdenest goes the other direction
assert powsimp(2**(2*x)) == 4**x
assert powsimp(exp(p/2)) == exp(p/2)
# issue 6368
eq = Mul(*[sqrt(Dummy(imaginary=True)) for i in range(3)])
assert powsimp(eq) == eq and eq.is_Mul
assert all(powsimp(e) == e for e in (sqrt(x**a), sqrt(x**2)))
# issue 8836
assert str( powsimp(exp(I*pi/3)*root(-1,3)) ) == '(-1)**(2/3)'
def test_powsimp_negated_base():
assert powsimp((-x + y)/sqrt(x - y)) == -sqrt(x - y)
assert powsimp((-x + y)*(-z + y)/sqrt(x - y)/sqrt(z - y)) == sqrt(x - y)*sqrt(z - y)
p = symbols('p', positive=True)
assert powsimp((-p)**a/p**a) == (-1)**a
n = symbols('n', negative=True)
assert powsimp((-n)**a/n**a) == (-1)**a
# if x is 0 then the lhs is 0**a*oo**a which is not (-1)**a
assert powsimp((-x)**a/x**a) != (-1)**a
def test_powsimp_nc():
x, y, z = symbols('x,y,z')
A, B, C = symbols('A B C', commutative=False)
assert powsimp(A**x*A**y, combine='all') == A**(x + y)
assert powsimp(A**x*A**y, combine='base') == A**x*A**y
assert powsimp(A**x*A**y, combine='exp') == A**(x + y)
assert powsimp(A**x*B**x, combine='all') == A**x*B**x
assert powsimp(A**x*B**x, combine='base') == A**x*B**x
assert powsimp(A**x*B**x, combine='exp') == A**x*B**x
assert powsimp(B**x*A**x, combine='all') == B**x*A**x
assert powsimp(B**x*A**x, combine='base') == B**x*A**x
assert powsimp(B**x*A**x, combine='exp') == B**x*A**x
assert powsimp(A**x*A**y*A**z, combine='all') == A**(x + y + z)
assert powsimp(A**x*A**y*A**z, combine='base') == A**x*A**y*A**z
assert powsimp(A**x*A**y*A**z, combine='exp') == A**(x + y + z)
assert powsimp(A**x*B**x*C**x, combine='all') == A**x*B**x*C**x
assert powsimp(A**x*B**x*C**x, combine='base') == A**x*B**x*C**x
assert powsimp(A**x*B**x*C**x, combine='exp') == A**x*B**x*C**x
assert powsimp(B**x*A**x*C**x, combine='all') == B**x*A**x*C**x
assert powsimp(B**x*A**x*C**x, combine='base') == B**x*A**x*C**x
assert powsimp(B**x*A**x*C**x, combine='exp') == B**x*A**x*C**x
def test_issue_6440():
assert powsimp(16*2**a*8**b) == 2**(a + 3*b + 4)
def test_powdenest():
from sympy import powdenest
from sympy.abc import x, y, z, a, b
p, q = symbols('p q', positive=True)
i, j = symbols('i,j', integer=True)
assert powdenest(x) == x
assert powdenest(x + 2*(x**(2*a/3))**(3*x)) == (x + 2*(x**(2*a/3))**(3*x))
assert powdenest((exp(2*a/3))**(3*x)) # -X-> (exp(a/3))**(6*x)
assert powdenest((x**(2*a/3))**(3*x)) == ((x**(2*a/3))**(3*x))
assert powdenest(exp(3*x*log(2))) == 2**(3*x)
assert powdenest(sqrt(p**2)) == p
i, j = symbols('i,j', integer=True)
eq = p**(2*i)*q**(4*i)
assert powdenest(eq) == (p*q**2)**(2*i)
# -X-> (x**x)**i*(x**x)**j == x**(x*(i + j))
assert powdenest((x**x)**(i + j))
assert powdenest(exp(3*y*log(x))) == x**(3*y)
assert powdenest(exp(y*(log(a) + log(b)))) == (a*b)**y
assert powdenest(exp(3*(log(a) + log(b)))) == a**3*b**3
assert powdenest(((x**(2*i))**(3*y))**x) == ((x**(2*i))**(3*y))**x
assert powdenest(((x**(2*i))**(3*y))**x, force=True) == x**(6*i*x*y)
assert powdenest(((x**(2*a/3))**(3*y/i))**x) == \
(((x**(2*a/3))**(3*y/i))**x)
assert powdenest((x**(2*i)*y**(4*i))**z, force=True) == (x*y**2)**(2*i*z)
assert powdenest((p**(2*i)*q**(4*i))**j) == (p*q**2)**(2*i*j)
e = ((p**(2*a))**(3*y))**x
assert powdenest(e) == e
e = ((x**2*y**4)**a)**(x*y)
assert powdenest(e) == e
e = (((x**2*y**4)**a)**(x*y))**3
assert powdenest(e) == ((x**2*y**4)**a)**(3*x*y)
assert powdenest((((x**2*y**4)**a)**(x*y)), force=True) == \
(x*y**2)**(2*a*x*y)
assert powdenest((((x**2*y**4)**a)**(x*y))**3, force=True) == \
(x*y**2)**(6*a*x*y)
assert powdenest((x**2*y**6)**i) != (x*y**3)**(2*i)
x, y = symbols('x,y', positive=True)
assert powdenest((x**2*y**6)**i) == (x*y**3)**(2*i)
assert powdenest((x**(2*i/3)*y**(i/2))**(2*i)) == (x**(S(4)/3)*y)**(i**2)
assert powdenest(sqrt(x**(2*i)*y**(6*i))) == (x*y**3)**i
assert powdenest(4**x) == 2**(2*x)
assert powdenest((4**x)**y) == 2**(2*x*y)
assert powdenest(4**x*y) == 2**(2*x)*y
def test_powdenest_polar():
x, y, z = symbols('x y z', polar=True)
a, b, c = symbols('a b c')
assert powdenest((x*y*z)**a) == x**a*y**a*z**a
assert powdenest((x**a*y**b)**c) == x**(a*c)*y**(b*c)
assert powdenest(((x**a)**b*y**c)**c) == x**(a*b*c)*y**(c**2)
def test_issue_5805():
arg = ((gamma(x)*hyper((), (), x))*pi)**2
assert powdenest(arg) == (pi*gamma(x)*hyper((), (), x))**2
assert arg.is_positive is None
def test_issue_9324_powsimp_on_matrix_symbol():
M = MatrixSymbol('M', 10, 10)
expr = powsimp(M, deep=True)
assert expr == M
assert expr.args[0] == 'M'
def test_issue_6367():
z = -5*sqrt(2)/(2*sqrt(2*sqrt(29) + 29)) + sqrt(-sqrt(29)/29 + S(1)/2)
assert Mul(*[powsimp(a) for a in Mul.make_args(z.normal())]) == 0
assert powsimp(z.normal()) == 0
assert simplify(z) == 0
assert powsimp(sqrt(2 + sqrt(3))*sqrt(2 - sqrt(3)) + 1) == 2
assert powsimp(z) != 0
def test_powsimp_polar():
from sympy import polar_lift, exp_polar
x, y, z = symbols('x y z')
p, q, r = symbols('p q r', polar=True)
assert (polar_lift(-1))**(2*x) == exp_polar(2*pi*I*x)
assert powsimp(p**x * q**x) == (p*q)**x
assert p**x * (1/p)**x == 1
assert (1/p)**x == p**(-x)
assert exp_polar(x)*exp_polar(y) == exp_polar(x)*exp_polar(y)
assert powsimp(exp_polar(x)*exp_polar(y)) == exp_polar(x + y)
assert powsimp(exp_polar(x)*exp_polar(y)*p**x*p**y) == \
(p*exp_polar(1))**(x + y)
assert powsimp(exp_polar(x)*exp_polar(y)*p**x*p**y, combine='exp') == \
exp_polar(x + y)*p**(x + y)
assert powsimp(
exp_polar(x)*exp_polar(y)*exp_polar(2)*sin(x) + sin(y) + p**x*p**y) \
== p**(x + y) + sin(x)*exp_polar(2 + x + y) + sin(y)
assert powsimp(sin(exp_polar(x)*exp_polar(y))) == \
sin(exp_polar(x)*exp_polar(y))
assert powsimp(sin(exp_polar(x)*exp_polar(y)), deep=True) == \
sin(exp_polar(x + y))
def test_issue_5728():
b = x*sqrt(y)
a = sqrt(b)
c = sqrt(sqrt(x)*y)
assert powsimp(a*b) == sqrt(b)**3
assert powsimp(a*b**2*sqrt(y)) == sqrt(y)*a**5
assert powsimp(a*x**2*c**3*y) == c**3*a**5
assert powsimp(a*x*c**3*y**2) == c**7*a
assert powsimp(x*c**3*y**2) == c**7
assert powsimp(x*c**3*y) == x*y*c**3
assert powsimp(sqrt(x)*c**3*y) == c**5
assert powsimp(sqrt(x)*a**3*sqrt(y)) == sqrt(x)*sqrt(y)*a**3
assert powsimp(Mul(sqrt(x)*c**3*sqrt(y), y, evaluate=False)) == \
sqrt(x)*sqrt(y)**3*c**3
assert powsimp(a**2*a*x**2*y) == a**7
# symbolic powers work, too
b = x**y*y
a = b*sqrt(b)
assert a.is_Mul is True
assert powsimp(a) == sqrt(b)**3
# as does exp
a = x*exp(2*y/3)
assert powsimp(a*sqrt(a)) == sqrt(a)**3
assert powsimp(a**2*sqrt(a)) == sqrt(a)**5
assert powsimp(a**2*sqrt(sqrt(a))) == sqrt(sqrt(a))**9
def test_issue_from_PR1599():
n1, n2, n3, n4 = symbols('n1 n2 n3 n4', negative=True)
assert (powsimp(sqrt(n1)*sqrt(n2)*sqrt(n3)) ==
-I*sqrt(-n1)*sqrt(-n2)*sqrt(-n3))
assert (powsimp(root(n1, 3)*root(n2, 3)*root(n3, 3)*root(n4, 3)) ==
-(-1)**(S(1)/3)*
(-n1)**(S(1)/3)*(-n2)**(S(1)/3)*(-n3)**(S(1)/3)*(-n4)**(S(1)/3))
def test_issue_10195():
a = Symbol('a', integer=True)
l = Symbol('l', even=True, nonzero=True)
n = Symbol('n', odd=True)
e_x = (-1)**(n/2 - Rational(1, 2)) - (-1)**(3*n/2 - Rational(1, 2))
assert powsimp((-1)**(l/2)) == I**l
assert powsimp((-1)**(n/2)) == I**n
assert powsimp((-1)**(3*n/2)) == -I**n
assert powsimp(e_x) == (-1)**(n/2 - Rational(1, 2)) + (-1)**(3*n/2 +
Rational(1,2))
assert powsimp((-1)**(3*a/2)) == (-I)**a
def test_issue_11981():
x, y = symbols('x y', commutative=False)
assert powsimp((x*y)**2 * (y*x)**2) == (x*y)**2 * (y*x)**2
| bsd-3-clause |
pelodelfuego/word2vec-toolbox | toolbox/mlLib/conceptPairFeature.py | 1 | 4358 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import __init__
import numpy as np
from scipy.weave import inline
from sklearn.ensemble import RandomForestClassifier
import cpLib.concept as cp
import utils.skUtils as sku
# PROJECTION
def projCosSim(c1, c2):
v1 = c1.vect
v2 = c2.vect
dimCount = len(v1)
arr = np.zeros(dimCount, 'f')
code = """
for(int i = 0; i < dimCount; i++) {
float norm_v1 = 0.0;
float norm_v2 = 0.0;
float dot_pdt = 0.0;
for(int j = 0; j < dimCount; j++) {
if(i != j) {
dot_pdt += v1[j] * v2[j];
norm_v1 += v1[j] * v1[j];
norm_v2 += v2[j] * v2[j];
}
}
norm_v1 = sqrtf(norm_v1);
norm_v2 = sqrtf(norm_v2);
arr[i] = dot_pdt / norm_v1 / norm_v2;
}
return_val = 1;
"""
inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc')
return arr
def projEuclDist(c1, c2):
v1 = c1.vect
v2 = c2.vect
dimCount = len(v1)
arr = np.zeros(dimCount, 'f')
code = """
for(int i = 0; i < dimCount; i++) {
float dist = 0.0;
for(int j = 0; j < dimCount; j++) {
if(i != j) {
dist += pow(v1[j] - v2[j], 2);
}
}
arr[i] = sqrt(dist);
}
return_val = 1;
"""
inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc')
return arr
def projManaDist(c1, c2):
v1 = c1.vect
v2 = c2.vect
dimCount = len(v1)
arr = np.zeros(dimCount, 'f')
code = """
for(int i = 0; i < dimCount; i++) {
float dist = 0.0;
for(int j = 0; j < dimCount; j++) {
if(i != j) {
dist += fabs(v1[i] - v2[i]);
}
}
arr[i] = dist;
}
return_val = 1;
"""
inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc')
return arr
# COMMUTATIVE FEATURE
def subCarth(conceptPair):
return conceptPair[2].vect - conceptPair[0].vect
def subPolar(conceptPair):
return conceptPair[2].polarVect() - conceptPair[0].polarVect()
def subAngular(conceptPair):
return conceptPair[2].angularVect() - conceptPair[0].angularVect()
def concatCarth(conceptPair):
return np.concatenate((conceptPair[0].vect, conceptPair[2].vect))
def concatPolar(conceptPair):
return np.concatenate((conceptPair[0].polarVect(), conceptPair[2].polarVect()))
def concatAngular(conceptPair):
return np.concatenate((conceptPair[0].angularVect(), conceptPair[2].angularVect()))
# NON COMMUATIVE FEATURE
# PROJECTION SIMILARITY
def pCosSim(conceptPair):
return projCosSim(conceptPair[0], conceptPair[2])
def pEuclDist(conceptPair):
return projEuclDist(conceptPair[0], conceptPair[2])
def pManaDist(conceptPair):
return projManaDist(conceptPair[0], conceptPair[2])
# PROJECTION DISSIMILARITY
def _projectionDissimarilty(projectionMetric, globalMetric, conceptPair):
projectedFeature = projectionMetric(conceptPair[0], conceptPair[2])
globalFeature = globalMetric(conceptPair[0], conceptPair[2])
return np.array([(globalFeature - v) for v in projectedFeature])
def pdCosSim(conceptPair):
return _projectionDissimarilty(projCosSim, cp.cosSim, conceptPair)
def pdEuclDist(conceptPair):
return _projectionDissimarilty(projEuclDist, cp.euclDist, conceptPair)
def pdManaDist(conceptPair):
return _projectionDissimarilty(projManaDist, cp.manaDist, conceptPair)
# CLF
class ConceptPairClf(object):
def __init__(self, clf, featureExtractionFct):
self.clf = clf
self.featureExtractionFct = featureExtractionFct
def fit(self, X, y):
self.clf.fit([self.featureExtractionFct(x) for x in X], y)
self.classes_ = self.clf.classes_
def predict(self, X):
return self.clf.predict([self.featureExtractionFct(x) for x in X])
def predict_proba(self, X):
return self.clf.predict_proba([self.featureExtractionFct(x) for x in X])
| gpl-3.0 |
ojengwa/oh-mainline | vendor/packages/Pygments/pygments/lexers/_luabuiltins.py | 275 | 6863 | # -*- coding: utf-8 -*-
"""
pygments.lexers._luabuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'basic': ['_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getfenv',
'getmetatable',
'ipairs',
'load',
'loadfile',
'loadstring',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawset',
'select',
'setfenv',
'setmetatable',
'tonumber',
'tostring',
'type',
'unpack',
'xpcall'],
'coroutine': ['coroutine.create',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'],
'debug': ['debug.debug',
'debug.getfenv',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.setfenv',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.traceback'],
'io': ['io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.tmpfile',
'io.type',
'io.write'],
'math': ['math.abs',
'math.acos',
'math.asin',
'math.atan2',
'math.atan',
'math.ceil',
'math.cosh',
'math.cos',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log10',
'math.log',
'math.max',
'math.min',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sinh',
'math.sin',
'math.sqrt',
'math.tanh',
'math.tan'],
'modules': ['module',
'require',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.seeall'],
'os': ['os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'],
'string': ['string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.rep',
'string.reverse',
'string.sub',
'string.upper'],
'table': ['table.concat',
'table.insert',
'table.maxn',
'table.remove',
'table.sort']}
if __name__ == '__main__':
import re
import urllib
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urllib.urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urllib.urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().iteritems():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('MODULES = %s\n\n' % pprint.pformat(modules))
f.write(footer)
f.close()
def run():
version = get_newest_version()
print '> Downloading function index for Lua %s' % version
functions = get_lua_functions(version)
print '> %d functions found:' % len(functions)
modules = {}
for full_function_name in functions:
print '>> %s' % full_function_name
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
regenerate(__file__, modules)
run()
| agpl-3.0 |
Adel-Magebinary/odoo | openerp/addons/test_documentation_examples/tests/test_delegation.py | 366 | 1299 | # -*- coding: utf-8 -*-
from openerp.tests import common
class TestDelegation(common.TransactionCase):
def setUp(self):
super(TestDelegation, self).setUp()
env = self.env
record = env['delegation.parent'].create({
'child0_id': env['delegation.child0'].create({'field_0': 0}).id,
'child1_id': env['delegation.child1'].create({'field_1': 1}).id,
})
self.record = record
def test_delegating_record(self):
env = self.env
record = self.record
# children fields can be looked up on the parent record directly
self.assertEqual(
record.field_0
,
0
)
self.assertEqual(
record.field_1
,
1
)
def test_swap_child(self):
env = self.env
record = self.record
record.write({
'child0_id': env['delegation.child0'].create({'field_0': 42}).id
})
self.assertEqual(
record.field_0
,
42
)
def test_write(self):
record = self.record
record.write({'field_1': 4})
self.assertEqual(
record.field_1
,
4
)
self.assertEqual(
record.child1_id.field_1
,
4
)
| agpl-3.0 |
skodapetr/lbvs-environment | scripts/libs/core.py | 1 | 1664 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import csv
import os
import logging
import gzip
__license__ = "X11"
def init_logging():
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] - %(message)s',
datefmt='%H:%M:%S')
def create_directory(path):
if not os.path.exists(path) and not path == "":
os.makedirs(path)
def create_parent_directory(path):
parent_directory = os.path.dirname(path)
if not os.path.exists(parent_directory) and not parent_directory == "":
os.makedirs(parent_directory)
def read_json(path):
if path.endswith(".gz"):
with gzip.open(path, "rt") as stream:
return json.load(stream)
else:
with open(path, "r") as stream:
return json.load(stream)
def write_json(path, object_to_write):
create_parent_directory(path)
if path.endswith(".gz"):
with gzip.open(path, "wt") as stream:
json.dump(object_to_write, stream, indent=2)
else:
with open(path, "w") as stream:
json.dump(object_to_write, stream, indent=2)
def read_csv_as_object(path):
"""
Read CSV lines as objects.
"""
results = []
with open(path) as stream:
reader = csv.reader(stream, delimiter=",", quotechar='"')
header = next(reader)
for row in reader:
new_object = {}
for index in range(0, len(row)):
new_object[header[index]] = row[index]
results.append(new_object)
return results
if __name__ == "__main__":
raise Exception("This module can be used only as a library!")
| mit |
willbarton/regulations-site | regulations/generator/layers/interpretations.py | 7 | 2479 | from django.http import HttpRequest
# Don't import PartialInterpView or utils directly; causes an import cycle
from regulations import generator, views
from regulations.generator.node_types import label_to_text
from regulations.generator.section_url import SectionUrl
class InterpretationsLayer(object):
"""Fetches the (rendered) interpretation for this node, if available"""
shorthand = 'interp'
def __init__(self, layer, version=None):
self.layer = layer
self.version = version
self.section_url = SectionUrl()
self.root_interp_label = None
self.partial_view = None
def preprocess_root(self, root):
"""The root label will allow us to use a single set of layer
appliers and grab all interp data at once."""
self.root_interp_label = '-'.join(root['label'] + ['Interp'])
view_class = views.partial_interp.PartialInterpView
self.partial_view = view_class.as_view(
inline=True, appliers=view_class.mk_appliers(
self.root_interp_label, self.version))
def apply_layer(self, text_index):
"""Return a pair of field-name + interpretation if one applies."""
if text_index in self.layer and self.layer[text_index]:
context = {'interps': [],
'for_markup_id': text_index,
'for_label': label_to_text(text_index.split('-'),
include_section=False)}
# Force caching of a few nodes up -- should prevent a request
# per interpretation if caching is on
generator.generator.get_tree_paragraph(
self.root_interp_label, self.version)
for layer_element in self.layer[text_index]:
reference = layer_element['reference']
request = HttpRequest()
request.method = 'GET'
response = self.partial_view(request, label_id=reference,
version=self.version)
response.render()
interp = {
'label_id': reference,
'markup': response.content,
}
ref_parts = reference.split('-')
interp['section_id'] = self.section_url.interp(
ref_parts, self.version)
context['interps'].append(interp)
return 'interp', context
| cc0-1.0 |
kaniblu/hangul-utils | hangul_utils/unicode.py | 1 | 8775 | __all__ = ["split_syllable_char", "split_syllables",
"join_jamos", "join_jamos_char",
"CHAR_INITIALS", "CHAR_MEDIALS", "CHAR_FINALS"]
import itertools
INITIAL = 0x001
MEDIAL = 0x010
FINAL = 0x100
CHAR_LISTS = {
INITIAL: list(map(chr, [
0x3131, 0x3132, 0x3134, 0x3137, 0x3138, 0x3139,
0x3141, 0x3142, 0x3143, 0x3145, 0x3146, 0x3147,
0x3148, 0x3149, 0x314a, 0x314b, 0x314c, 0x314d,
0x314e
])),
MEDIAL: list(map(chr, [
0x314f, 0x3150, 0x3151, 0x3152, 0x3153, 0x3154,
0x3155, 0x3156, 0x3157, 0x3158, 0x3159, 0x315a,
0x315b, 0x315c, 0x315d, 0x315e, 0x315f, 0x3160,
0x3161, 0x3162, 0x3163
])),
FINAL: list(map(chr, [
0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136,
0x3137, 0x3139, 0x313a, 0x313b, 0x313c, 0x313d,
0x313e, 0x313f, 0x3140, 0x3141, 0x3142, 0x3144,
0x3145, 0x3146, 0x3147, 0x3148, 0x314a, 0x314b,
0x314c, 0x314d, 0x314e
]))
}
CHAR_INITIALS = CHAR_LISTS[INITIAL]
CHAR_MEDIALS = CHAR_LISTS[MEDIAL]
CHAR_FINALS = CHAR_LISTS[FINAL]
CHAR_SETS = {k: set(v) for k, v in CHAR_LISTS.items()}
CHARSET = set(itertools.chain(*CHAR_SETS.values()))
CHAR_INDICES = {k: {c: i for i, c in enumerate(v)}
for k, v in CHAR_LISTS.items()}
def is_hangul_syllable(c):
return 0xac00 <= ord(c) <= 0xd7a3 # Hangul Syllables
def is_hangul_jamo(c):
return 0x1100 <= ord(c) <= 0x11ff # Hangul Jamo
def is_hangul_compat_jamo(c):
return 0x3130 <= ord(c) <= 0x318f # Hangul Compatibility Jamo
def is_hangul_jamo_exta(c):
return 0xa960 <= ord(c) <= 0xa97f # Hangul Jamo Extended-A
def is_hangul_jamo_extb(c):
return 0xd7b0 <= ord(c) <= 0xd7ff # Hangul Jamo Extended-B
def is_hangul(c):
return (is_hangul_syllable(c) or
is_hangul_jamo(c) or
is_hangul_compat_jamo(c) or
is_hangul_jamo_exta(c) or
is_hangul_jamo_extb(c))
def is_supported_hangul(c):
return is_hangul_syllable(c) or is_hangul_compat_jamo(c)
def check_hangul(c, jamo_only=False):
if not ((jamo_only or is_hangul_compat_jamo(c)) or is_supported_hangul(c)):
raise ValueError(f"'{c}' is not a supported hangul character. "
f"'Hangul Syllables' (0xac00 ~ 0xd7a3) and "
f"'Hangul Compatibility Jamos' (0x3130 ~ 0x318f) are "
f"supported at the moment.")
def get_jamo_type(c):
check_hangul(c)
assert is_hangul_compat_jamo(c), f"not a jamo: {ord(c):x}"
return sum(t for t, s in CHAR_SETS.items() if c in s)
def split_syllable_char(c):
"""
Splits a given korean syllable into its components. Each component is
represented by Unicode in 'Hangul Compatibility Jamo' range.
Arguments:
c: A Korean character.
Returns:
A triple (initial, medial, final) of Hangul Compatibility Jamos.
If no jamo corresponds to a position, `None` is returned there.
Example:
>>> split_syllable_char("안")
("ㅇ", "ㅏ", "ㄴ")
>>> split_syllable_char("고")
("ㄱ", "ㅗ", None)
>>> split_syllable_char("ㅗ")
(None, "ㅗ", None)
>>> split_syllable_char("ㅇ")
("ㅇ", None, None)
"""
check_hangul(c)
if len(c) != 1:
raise ValueError("Input string must have exactly one character.")
init, med, final = None, None, None
if is_hangul_syllable(c):
offset = ord(c) - 0xac00
x = (offset - offset % 28) // 28
init, med, final = x // 21, x % 21, offset % 28
if not final:
final = None
else:
final -= 1
else:
pos = get_jamo_type(c)
if pos & INITIAL == INITIAL:
pos = INITIAL
elif pos & MEDIAL == MEDIAL:
pos = MEDIAL
elif pos & FINAL == FINAL:
pos = FINAL
idx = CHAR_INDICES[pos][c]
if pos == INITIAL:
init = idx
elif pos == MEDIAL:
med = idx
else:
final = idx
return tuple(CHAR_LISTS[pos][idx] if idx is not None else None
for pos, idx in
zip([INITIAL, MEDIAL, FINAL], [init, med, final]))
def split_syllables(s, ignore_err=True, pad=None):
"""
Performs syllable-split on a string.
Arguments:
s (str): A string (possibly mixed with non-Hangul characters).
ignore_err (bool): If set False, it ensures that all characters in
the string are Hangul-splittable and throws a ValueError otherwise.
(default: True)
pad (str): Pad empty jamo positions (initial, medial, or final) with
`pad` character. This is useful for cases where fixed-length
strings are needed. (default: None)
Returns:
Hangul-split string
Example:
>>> split_syllables("안녕하세요")
"ㅇㅏㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ"
>>> split_syllables("안녕하세요~~", ignore_err=False)
ValueError: encountered an unsupported character: ~ (0x7e)
>>> split_syllables("안녕하세요ㅛ", pad="x")
'ㅇㅏㄴㄴㅕㅇㅎㅏxㅅㅔxㅇㅛxxㅛx'
"""
def try_split(c):
try:
return split_syllable_char(c)
except ValueError:
if ignore_err:
return (c,)
raise ValueError(f"encountered an unsupported character: "
f"{c} (0x{ord(c):x})")
s = map(try_split, s)
if pad is not None:
tuples = map(lambda x: tuple(pad if y is None else y for y in x), s)
else:
tuples = map(lambda x: filter(None, x), s)
return "".join(itertools.chain(*tuples))
def join_jamos_char(init, med, final=None):
"""
Combines jamos into a single syllable.
Arguments:
init (str): Initial jao.
med (str): Medial jamo.
final (str): Final jamo. If not supplied, the final syllable is made
without the final. (default: None)
Returns:
A Korean syllable.
"""
chars = (init, med, final)
for c in filter(None, chars):
check_hangul(c, jamo_only=True)
idx = tuple(CHAR_INDICES[pos][c] if c is not None else c
for pos, c in zip((INITIAL, MEDIAL, FINAL), chars))
init_idx, med_idx, final_idx = idx
# final index must be shifted once as
# final index with 0 points to syllables without final
final_idx = 0 if final_idx is None else final_idx + 1
return chr(0xac00 + 28 * 21 * init_idx + 28 * med_idx + final_idx)
def join_jamos(s, ignore_err=True):
"""
Combines a sequence of jamos to produce a sequence of syllables.
Arguments:
s (str): A string (possible mixed with non-jamo characters).
ignore_err (bool): If set False, it will ensure that all characters
will be consumed for the making of syllables. It will throw a
ValueError when it fails to do so. (default: True)
Returns:
A string
Example:
>>> join_jamos("ㅇㅏㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ")
"안녕하세요"
>>> join_jamos("ㅇㅏㄴㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ")
"안ㄴ녕하세요"
>>> join_jamos()
"""
last_t = 0
queue = []
new_string = ""
def flush(n=0):
new_queue = []
while len(queue) > n:
new_queue.append(queue.pop())
if len(new_queue) == 1:
if not ignore_err:
raise ValueError(f"invalid jamo character: {new_queue[0]}")
result = new_queue[0]
elif len(new_queue) >= 2:
try:
result = join_jamos_char(*new_queue)
except (ValueError, KeyError):
# Invalid jamo combination
if not ignore_err:
raise ValueError(f"invalid jamo characters: {new_queue}")
result = "".join(new_queue)
else:
result = None
return result
for c in s:
if c not in CHARSET:
if queue:
new_c = flush() + c
else:
new_c = c
last_t = 0
else:
t = get_jamo_type(c)
new_c = None
if t & FINAL == FINAL:
if not (last_t == MEDIAL):
new_c = flush()
elif t == INITIAL:
new_c = flush()
elif t == MEDIAL:
if last_t & INITIAL == INITIAL:
new_c = flush(1)
else:
new_c = flush()
last_t = t
queue.insert(0, c)
if new_c:
new_string += new_c
if queue:
new_string += flush()
return new_string
| gpl-3.0 |
leorochael/odoo | addons/account_analytic_analysis/res_config.py | 426 | 1408 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_configuration(osv.osv_memory):
_inherit = 'sale.config.settings'
_columns = {
'group_template_required': fields.boolean("Mandatory use of templates.",
implied_group='account_analytic_analysis.group_template_required',
help="Allows you to set the template field as required when creating an analytic account or a contract."),
}
| agpl-3.0 |
maftieu/CouchPotatoServer | libs/git/__init__.py | 121 | 1673 | # Copyright (c) 2009, Rotem Yaari <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from repository import RemoteRepository
from repository import LocalRepository
from repository import clone
from repository import find_repository
| gpl-3.0 |
h3llrais3r/Auto-Subliminal | lib/ws4py/websocket.py | 4 | 17891 | # -*- coding: utf-8 -*-
import logging
import socket
import ssl
import time
import threading
import types
import errno
try:
from OpenSSL.SSL import Error as pyOpenSSLError
except ImportError:
class pyOpenSSLError(Exception):
pass
from ws4py import WS_KEY, WS_VERSION
from ws4py.exc import HandshakeError, StreamClosed
from ws4py.streaming import Stream
from ws4py.messaging import Message, PingControlMessage,\
PongControlMessage
from ws4py.compat import basestring, unicode
DEFAULT_READING_SIZE = 2
logger = logging.getLogger('ws4py')
__all__ = ['WebSocket', 'EchoWebSocket', 'Heartbeat']
class Heartbeat(threading.Thread):
def __init__(self, websocket, frequency=2.0):
"""
Runs at a periodic interval specified by
`frequency` by sending an unsolicitated pong
message to the connected peer.
If the message fails to be sent and a socket
error is raised, we close the websocket
socket automatically, triggering the `closed`
handler.
"""
threading.Thread.__init__(self)
self.websocket = websocket
self.frequency = frequency
def __enter__(self):
if self.frequency:
self.start()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.stop()
def stop(self):
self.running = False
def run(self):
self.running = True
while self.running:
time.sleep(self.frequency)
if self.websocket.terminated:
break
try:
self.websocket.send(PongControlMessage(data='beep'))
except socket.error:
logger.info("Heartbeat failed")
self.websocket.server_terminated = True
self.websocket.close_connection()
break
class WebSocket(object):
""" Represents a websocket endpoint and provides a high level interface to drive the endpoint. """
def __init__(self, sock, protocols=None, extensions=None, environ=None, heartbeat_freq=None):
""" The ``sock`` is an opened connection
resulting from the websocket handshake.
If ``protocols`` is provided, it is a list of protocols
negotiated during the handshake as is ``extensions``.
If ``environ`` is provided, it is a copy of the WSGI environ
dictionnary from the underlying WSGI server.
"""
self.stream = Stream(always_mask=False)
"""
Underlying websocket stream that performs the websocket
parsing to high level objects. By default this stream
never masks its messages. Clients using this class should
set the ``stream.always_mask`` fields to ``True``
and ``stream.expect_masking`` fields to ``False``.
"""
self.protocols = protocols
"""
List of protocols supported by this endpoint.
Unused for now.
"""
self.extensions = extensions
"""
List of extensions supported by this endpoint.
Unused for now.
"""
self.sock = sock
"""
Underlying connection.
"""
self._is_secure = hasattr(sock, '_ssl') or hasattr(sock, '_sslobj')
"""
Tell us if the socket is secure or not.
"""
self.client_terminated = False
"""
Indicates if the client has been marked as terminated.
"""
self.server_terminated = False
"""
Indicates if the server has been marked as terminated.
"""
self.reading_buffer_size = DEFAULT_READING_SIZE
"""
Current connection reading buffer size.
"""
self.environ = environ
"""
WSGI environ dictionary.
"""
self.heartbeat_freq = heartbeat_freq
"""
At which interval the heartbeat will be running.
Set this to `0` or `None` to disable it entirely.
"""
"Internal buffer to get around SSL problems"
self.buf = b''
self._local_address = None
self._peer_address = None
@property
def local_address(self):
"""
Local endpoint address as a tuple
"""
if not self._local_address:
self._local_address = self.sock.getsockname()
if len(self._local_address) == 4:
self._local_address = self._local_address[:2]
return self._local_address
@property
def peer_address(self):
"""
Peer endpoint address as a tuple
"""
if not self._peer_address:
self._peer_address = self.sock.getpeername()
if len(self._peer_address) == 4:
self._peer_address = self._peer_address[:2]
return self._peer_address
def opened(self):
"""
Called by the server when the upgrade handshake
has succeeded.
"""
pass
def close(self, code=1000, reason=''):
"""
Call this method to initiate the websocket connection
closing by sending a close frame to the connected peer.
The ``code`` is the status code representing the
termination's reason.
Once this method is called, the ``server_terminated``
attribute is set. Calling this method several times is
safe as the closing frame will be sent only the first
time.
.. seealso:: Defined Status Codes http://tools.ietf.org/html/rfc6455#section-7.4.1
"""
if not self.server_terminated:
self.server_terminated = True
try:
self._write(self.stream.close(code=code, reason=reason).single(mask=self.stream.always_mask))
except Exception as ex:
logger.error("Error when terminating the connection: %s", str(ex))
def closed(self, code, reason=None):
"""
Called when the websocket stream and connection are finally closed.
The provided ``code`` is status set by the other point and
``reason`` is a human readable message.
.. seealso:: Defined Status Codes http://tools.ietf.org/html/rfc6455#section-7.4.1
"""
pass
@property
def terminated(self):
"""
Returns ``True`` if both the client and server have been
marked as terminated.
"""
return self.client_terminated is True and self.server_terminated is True
@property
def connection(self):
return self.sock
def close_connection(self):
"""
Shutdowns then closes the underlying connection.
"""
if self.sock:
try:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
except:
pass
finally:
self.sock = None
def ping(self, message):
"""
Send a ping message to the remote peer.
The given `message` must be a unicode string.
"""
self.send(PingControlMessage(message))
def ponged(self, pong):
"""
Pong message, as a :class:`messaging.PongControlMessage` instance,
received on the stream.
"""
pass
def received_message(self, message):
"""
Called whenever a complete ``message``, binary or text,
is received and ready for application's processing.
The passed message is an instance of :class:`messaging.TextMessage`
or :class:`messaging.BinaryMessage`.
.. note:: You should override this method in your subclass.
"""
pass
def unhandled_error(self, error):
"""
Called whenever a socket, or an OS, error is trapped
by ws4py but not managed by it. The given error is
an instance of `socket.error` or `OSError`.
Note however that application exceptions will not go
through this handler. Instead, do make sure you
protect your code appropriately in `received_message`
or `send`.
The default behaviour of this handler is to log
the error with a message.
"""
logger.exception("Failed to receive data")
def _write(self, b):
"""
Trying to prevent a write operation
on an already closed websocket stream.
This cannot be bullet proof but hopefully
will catch almost all use cases.
"""
if self.terminated or self.sock is None:
raise RuntimeError("Cannot send on a terminated websocket")
self.sock.sendall(b)
def send(self, payload, binary=False):
"""
Sends the given ``payload`` out.
If ``payload`` is some bytes or a bytearray,
then it is sent as a single message not fragmented.
If ``payload`` is a generator, each chunk is sent as part of
fragmented message.
If ``binary`` is set, handles the payload as a binary message.
"""
message_sender = self.stream.binary_message if binary else self.stream.text_message
if isinstance(payload, basestring) or isinstance(payload, bytearray):
m = message_sender(payload).single(mask=self.stream.always_mask)
self._write(m)
elif isinstance(payload, Message):
data = payload.single(mask=self.stream.always_mask)
self._write(data)
elif type(payload) == types.GeneratorType:
bytes = next(payload)
first = True
for chunk in payload:
self._write(message_sender(bytes).fragment(first=first, mask=self.stream.always_mask))
bytes = chunk
first = False
self._write(message_sender(bytes).fragment(first=first, last=True, mask=self.stream.always_mask))
else:
raise ValueError("Unsupported type '%s' passed to send()" % type(payload))
def _get_from_pending(self):
"""
The SSL socket object provides the same interface
as the socket interface but behaves differently.
When data is sent over a SSL connection
more data may be read than was requested from by
the ws4py websocket object.
In that case, the data may have been indeed read
from the underlying real socket, but not read by the
application which will expect another trigger from the
manager's polling mechanism as if more data was still on the
wire. This will happen only when new data is
sent by the other peer which means there will be
some delay before the initial read data is handled
by the application.
Due to this, we have to rely on a non-public method
to query the internal SSL socket buffer if it has indeed
more data pending in its buffer.
Now, some people in the Python community
`discourage <https://bugs.python.org/issue21430>`_
this usage of the ``pending()`` method because it's not
the right way of dealing with such use case. They advise
`this approach <https://docs.python.org/dev/library/ssl.html#notes-on-non-blocking-sockets>`_
instead. Unfortunately, this applies only if the
application can directly control the poller which is not
the case with the WebSocket abstraction here.
We therefore rely on this `technic <http://stackoverflow.com/questions/3187565/select-and-ssl-in-python>`_
which seems to be valid anyway.
This is a bit of a shame because we have to process
more data than what wanted initially.
"""
data = b""
pending = self.sock.pending()
while pending:
data += self.sock.recv(pending)
pending = self.sock.pending()
return data
def once(self):
"""
Performs the operation of reading from the underlying
connection in order to feed the stream of bytes.
Because this needs to support SSL sockets, we must always
read as much as might be in the socket at any given time,
however process expects to have itself called with only a certain
number of bytes at a time. That number is found in
self.reading_buffer_size, so we read everything into our own buffer,
and then from there feed self.process.
Then the stream indicates
whatever size must be read from the connection since
it knows the frame payload length.
It returns `False` if an error occurred at the
socket level or during the bytes processing. Otherwise,
it returns `True`.
"""
if self.terminated:
logger.debug("WebSocket is already terminated")
return False
try:
b = b''
if self._is_secure:
b = self._get_from_pending()
if not b and not self.buf:
b = self.sock.recv(self.reading_buffer_size)
if not b and not self.buf:
return False
self.buf += b
except (socket.error, OSError, pyOpenSSLError) as e:
if hasattr(e, "errno") and e.errno == errno.EINTR:
pass
else:
self.unhandled_error(e)
return False
else:
# process as much as we can
# the process will stop either if there is no buffer left
# or if the stream is closed
# only pass the requested number of bytes, leave the rest in the buffer
requested = self.reading_buffer_size
if not self.process(self.buf[:requested]):
return False
self.buf = self.buf[requested:]
return True
def terminate(self):
"""
Completes the websocket by calling the `closed`
method either using the received closing code
and reason, or when none was received, using
the special `1006` code.
Finally close the underlying connection for
good and cleanup resources by unsetting
the `environ` and `stream` attributes.
"""
s = self.stream
try:
if s.closing is None:
self.closed(1006, "Going away")
else:
self.closed(s.closing.code, s.closing.reason)
finally:
self.client_terminated = self.server_terminated = True
self.close_connection()
# Cleaning up resources
s._cleanup()
self.stream = None
self.environ = None
def process(self, bytes):
""" Takes some bytes and process them through the
internal stream's parser. If a message of any kind is
found, performs one of these actions:
* A closing message will initiate the closing handshake
* Errors will initiate a closing handshake
* A message will be passed to the ``received_message`` method
* Pings will see pongs be sent automatically
* Pongs will be passed to the ``ponged`` method
The process should be terminated when this method
returns ``False``.
"""
s = self.stream
if not bytes and self.reading_buffer_size > 0:
return False
self.reading_buffer_size = s.parser.send(bytes) or DEFAULT_READING_SIZE
if s.closing is not None:
logger.debug("Closing message received (%d) '%s'" % (s.closing.code, s.closing.reason))
if not self.server_terminated:
self.close(s.closing.code, s.closing.reason)
else:
self.client_terminated = True
return False
if s.errors:
for error in s.errors:
logger.debug("Error message received (%d) '%s'" % (error.code, error.reason))
self.close(error.code, error.reason)
s.errors = []
return False
if s.has_message:
self.received_message(s.message)
if s.message is not None:
s.message.data = None
s.message = None
return True
if s.pings:
for ping in s.pings:
self._write(s.pong(ping.data))
s.pings = []
if s.pongs:
for pong in s.pongs:
self.ponged(pong)
s.pongs = []
return True
def run(self):
"""
Performs the operation of reading from the underlying
connection in order to feed the stream of bytes.
We start with a small size of two bytes to be read
from the connection so that we can quickly parse an
incoming frame header. Then the stream indicates
whatever size must be read from the connection since
it knows the frame payload length.
Note that we perform some automatic opererations:
* On a closing message, we respond with a closing
message and finally close the connection
* We respond to pings with pong messages.
* Whenever an error is raised by the stream parsing,
we initiate the closing of the connection with the
appropiate error code.
This method is blocking and should likely be run
in a thread.
"""
self.sock.setblocking(True)
with Heartbeat(self, frequency=self.heartbeat_freq):
s = self.stream
try:
self.opened()
while not self.terminated:
if not self.once():
break
finally:
self.terminate()
class EchoWebSocket(WebSocket):
def received_message(self, message):
"""
Automatically sends back the provided ``message`` to
its originating endpoint.
"""
self.send(message.data, message.is_binary)
| gpl-3.0 |
crosby823/pgd | sites/all/modules/fckeditor/fckeditor/_samples/py/sample01.py | 33 | 2083 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Sample page.
"""
import cgi
import os
# Ensure that the fckeditor.py is included in your classpath
import fckeditor
# Tell the browser to render html
print "Content-Type: text/html"
print ""
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Sample</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h1>FCKeditor - Python - Sample 1</h1>
This sample displays a normal HTML form with an FCKeditor with full features
enabled.
<hr>
<form action="sampleposteddata.py" method="post" target="_blank">
"""
# This is the real work
try:
sBasePath = os.environ.get("SCRIPT_NAME")
sBasePath = sBasePath[0:sBasePath.find("_samples")]
oFCKeditor = fckeditor.FCKeditor('FCKeditor1')
oFCKeditor.BasePath = sBasePath
oFCKeditor.Value = """<p>This is some <strong>sample text</strong>. You are using <a href="http://www.fckeditor.net/">FCKeditor</a>.</p>"""
print oFCKeditor.Create()
except Exception, e:
print e
print """
<br>
<input type="submit" value="Submit">
</form>
"""
# For testing your environments
#print "<hr>"
#for key in os.environ.keys():
# print "%s: %s<br>" % (key, os.environ.get(key, ""))
#print "<hr>"
# Document footer
print """
</body>
</html>
"""
| gpl-2.0 |
YACOWS/opps | tests/core/permissions/test_models.py | 4 | 2200 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from opps.channels.models import Channel
from opps.core.permissions.models import Permission, PermissionGroup
User = get_user_model()
class PermissionModelTest(TestCase):
def test_create(self):
user = User.objects.create(username='user')
instance = Permission.objects.create(user=user)
self.assertTrue(instance)
def test_empty_get_by_user(self):
user = User.objects.create(username='another')
result = Permission.get_by_user(user)
self.assertEqual(len(result['sites_id']), 0)
self.assertEqual(len(result['all_sites_id']), 0)
self.assertEqual(len(result['channels_id']), 0)
self.assertEqual(len(result['channels_sites_id']), 0)
def test_get_by_user_with_user_permission(self):
user = User.objects.create(username='john_doe')
site = Site.objects.all()[0]
channel = Channel.objects.create(
name='Home',
slug='home',
site=site,
user=user
)
permission = Permission.objects.create(user=user)
permission.channel.add(channel)
permission.save()
result = Permission.get_by_user(user)
self.assertTrue(site.pk in result['all_sites_id'])
self.assertTrue(channel.pk in result['channels_id'])
def test_get_by_user_with_group_permission(self):
group = Group.objects.create(name='programmers')
user = User.objects.create(username='john_doe')
user.groups.add(group)
site = Site.objects.all()[0]
channel = Channel.objects.create(
name='Home',
slug='home',
site=site,
user=user
)
permission = PermissionGroup.objects.create(group=group)
permission.channel.add(channel)
permission.save()
result = Permission.get_by_user(user)
self.assertTrue(site.pk in result['all_sites_id'])
self.assertTrue(channel.pk in result['channels_id'])
| mit |
Distrotech/bzr | bzrlib/tests/test_cache_utf8.py | 2 | 4352 | # Copyright (C) 2006 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for utf8 caching."""
from bzrlib import (
cache_utf8,
)
from bzrlib.tests import TestCase
class TestEncodeCache(TestCase):
def setUp(self):
super(TestEncodeCache, self).setUp()
cache_utf8.clear_encoding_cache()
self.addCleanup(cache_utf8.clear_encoding_cache)
def check_encode(self, rev_id):
rev_id_utf8 = rev_id.encode('utf-8')
self.assertFalse(rev_id in cache_utf8._unicode_to_utf8_map)
self.assertFalse(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
# After a single encode, the mapping should exist for
# both directions
self.assertEqual(rev_id_utf8, cache_utf8.encode(rev_id))
self.assertTrue(rev_id in cache_utf8._unicode_to_utf8_map)
self.assertTrue(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
self.assertEqual(rev_id, cache_utf8.decode(rev_id_utf8))
cache_utf8.clear_encoding_cache()
self.assertFalse(rev_id in cache_utf8._unicode_to_utf8_map)
self.assertFalse(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
def check_decode(self, rev_id):
rev_id_utf8 = rev_id.encode('utf-8')
self.assertFalse(rev_id in cache_utf8._unicode_to_utf8_map)
self.assertFalse(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
# After a single decode, the mapping should exist for
# both directions
self.assertEqual(rev_id, cache_utf8.decode(rev_id_utf8))
self.assertTrue(rev_id in cache_utf8._unicode_to_utf8_map)
self.assertTrue(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
self.assertEqual(rev_id_utf8, cache_utf8.encode(rev_id))
cache_utf8.clear_encoding_cache()
self.assertFalse(rev_id in cache_utf8._unicode_to_utf8_map)
self.assertFalse(rev_id_utf8 in cache_utf8._utf8_to_unicode_map)
def test_ascii(self):
self.check_decode(u'all_ascii_characters123123123')
self.check_encode(u'all_ascii_characters123123123')
def test_unicode(self):
self.check_encode(u'some_\xb5_unicode_\xe5_chars')
self.check_decode(u'some_\xb5_unicode_\xe5_chars')
def test_cached_unicode(self):
x = u'\xb5yy' + u'\xe5zz'
y = u'\xb5yy' + u'\xe5zz'
self.assertFalse(x is y)
xp = cache_utf8.get_cached_unicode(x)
yp = cache_utf8.get_cached_unicode(y)
self.assertIs(xp, x)
self.assertIs(xp, yp)
def test_cached_utf8(self):
x = u'\xb5yy\xe5zz'.encode('utf8')
y = u'\xb5yy\xe5zz'.encode('utf8')
self.assertFalse(x is y)
xp = cache_utf8.get_cached_utf8(x)
yp = cache_utf8.get_cached_utf8(y)
self.assertIs(xp, x)
self.assertIs(xp, yp)
def test_cached_ascii(self):
x = '%s %s' % ('simple', 'text')
y = '%s %s' % ('simple', 'text')
self.assertFalse(x is y)
xp = cache_utf8.get_cached_ascii(x)
yp = cache_utf8.get_cached_ascii(y)
self.assertIs(xp, x)
self.assertIs(xp, yp)
# after caching, encode and decode should also return the right
# objects.
uni_x = cache_utf8.decode(x)
self.assertEqual(u'simple text', uni_x)
self.assertIsInstance(uni_x, unicode)
utf8_x = cache_utf8.encode(uni_x)
self.assertIs(utf8_x, x)
def test_decode_with_None(self):
self.assertEqual(None, cache_utf8._utf8_decode_with_None(None))
self.assertEqual(u'foo', cache_utf8._utf8_decode_with_None('foo'))
self.assertEqual(u'f\xb5',
cache_utf8._utf8_decode_with_None('f\xc2\xb5'))
| gpl-2.0 |
jss-emr/openerp-7-src | openerp/addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/__init__.py | 392 | 1180 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import actions
import error
import functions
import gui
import logreport
import rpc
import tiny_socket
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
eirslett/microservices-infrastructure | docs/conf.py | 25 | 9388 | # -*- coding: utf-8 -*-
#
# Microservices Infrastructure documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 4 06:59:14 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Microservices Infrastructure'
copyright = u'2015, Cisco Systems, Incorporated'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
import alabaster
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
extensions += ['alabaster']
html_theme_options = {
'github_user': 'ciscocloud',
'github_repo': 'microservices-infrastructure',
'logo': 'cisco.png',
'logo_name': True,
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html'
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MicroservicesInfrastructuredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'MicroservicesInfrastructure.tex', u'Microservices Infrastructure Documentation',
u'Cisco Systems, Incorporated', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
latex_show_urls = 'footnote'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'microservicesinfrastructure', u'Microservices Infrastructure Documentation',
[u'Cisco Systems, Incorporated'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'MicroservicesInfrastructure', u'Microservices Infrastructure Documentation',
u'Cisco Systems, Incorporated', 'MicroservicesInfrastructure', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'ansible': ('http://docs.ansible.com/', None),
}
# -- Options for todo ext ------------------------------------------------
todo_include_todos = os.getenv('INCLUDE_TODOS', '0') == '1' or version != release
# -- setup ---------------------------------------------------------------
def setup(app):
from sphinx.util.texescape import tex_replacements
tex_replacements.extend([
(u'☐', u'[ ]'),
(u'☑', u'[x]'),
])
| apache-2.0 |
gskachkov/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/w3c/test_parser.py | 135 | 6756 | #!/usr/bin/env python
# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import logging
import re
from webkitpy.common.host import Host
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup as Parser
_log = logging.getLogger(__name__)
class TestParser(object):
def __init__(self, options, filename):
self.options = options
self.filename = filename
self.host = Host()
self.filesystem = self.host.filesystem
self.test_doc = None
self.ref_doc = None
self.load_file(filename)
def load_file(self, filename):
if self.filesystem.isfile(filename):
try:
self.test_doc = Parser(self.filesystem.read_binary_file(filename))
except:
# FIXME: Figure out what to do if we can't parse the file.
_log.error("Failed to parse %s", filename)
self.test_doc is None
else:
if self.filesystem.isdir(filename):
# FIXME: Figure out what is triggering this and what to do about it.
_log.error("Trying to load %s, which is a directory", filename)
self.test_doc = None
self.ref_doc = None
def analyze_test(self, test_contents=None, ref_contents=None):
""" Analyzes a file to determine if it's a test, what type of test, and what reference or support files it requires. Returns all of the test info """
test_info = None
if test_contents is None and self.test_doc is None:
return test_info
if test_contents is not None:
self.test_doc = Parser(test_contents)
if ref_contents is not None:
self.ref_doc = Parser(ref_contents)
# First check if it's a reftest
matches = self.reference_links_of_type('match') + self.reference_links_of_type('mismatch')
if matches:
if len(matches) > 1:
# FIXME: Is this actually true? We should fix this.
_log.warning('Multiple references are not supported. Importing the first ref defined in %s',
self.filesystem.basename(self.filename))
try:
ref_file = self.filesystem.join(self.filesystem.dirname(self.filename), matches[0]['href'])
except KeyError as e:
# FIXME: Figure out what to do w/ invalid test files.
_log.error('%s has a reference link but is missing the "href"', self.filesystem)
return None
if self.ref_doc is None:
self.ref_doc = self.load_file(ref_file)
test_info = {'test': self.filename, 'reference': ref_file}
# If the ref file path is relative, we need to check it for
# relative paths also because when it lands in WebKit, it will be
# moved down into the test dir.
#
# Note: The test files themselves are not checked for support files
# outside their directories as the convention in the CSSWG is to
# put all support files in the same dir or subdir as the test.
#
# All non-test files in the test's directory tree are normally
# copied as part of the import as they are assumed to be required
# support files.
#
# *But*, there is exactly one case in the entire css2.1 suite where
# a test depends on a file that lives in a different directory,
# which depends on another file that lives outside of its
# directory. This code covers that case :)
if matches[0]['href'].startswith('..'):
support_files = self.support_files(self.ref_doc)
test_info['refsupport'] = support_files
elif self.is_jstest():
test_info = {'test': self.filename, 'jstest': True}
elif self.options['all'] is True and not('-ref' in self.filename) and not('reference' in self.filename):
test_info = {'test': self.filename}
return test_info
def reference_links_of_type(self, reftest_type):
return self.test_doc.findAll(rel=reftest_type)
def is_jstest(self):
"""Returns whether the file appears to be a jstest, by searching for usage of W3C-style testharness paths."""
return bool(self.test_doc.find(src=re.compile('[\'\"/]?/resources/testharness')))
def support_files(self, doc):
""" Searches the file for all paths specified in url()'s, href or src attributes."""
support_files = []
if doc is None:
return support_files
elements_with_src_attributes = doc.findAll(src=re.compile('.*'))
elements_with_href_attributes = doc.findAll(href=re.compile('.*'))
url_pattern = re.compile('url\(.*\)')
urls = []
for url in doc.findAll(text=url_pattern):
url = re.search(url_pattern, url)
url = re.sub('url\([\'\"]?', '', url.group(0))
url = re.sub('[\'\"]?\)', '', url)
urls.append(url)
src_paths = [src_tag['src'] for src_tag in elements_with_src_attributes]
href_paths = [href_tag['href'] for href_tag in elements_with_href_attributes]
paths = src_paths + href_paths + urls
for path in paths:
if not(path.startswith('http:')) and not(path.startswith('mailto:')):
support_files.append(path)
return support_files
| bsd-3-clause |
drnextgis/QGIS | python/plugins/processing/core/parameters.py | 1 | 55397 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Parameters.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
from builtins import range
from builtins import object
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import sys
import os
import math
from inspect import isclass
from copy import deepcopy
import numbers
from qgis.utils import iface
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (QgsRasterLayer, QgsVectorLayer, QgsMapLayer, QgsCoordinateReferenceSystem,
QgsExpressionContext, QgsExpressionContextUtils, QgsExpression, QgsExpressionContextScope)
from processing.tools.vector import resolveFieldIndex, features
from processing.tools import dataobjects
from processing.core.outputs import OutputNumber, OutputRaster, OutputVector
from processing.tools.dataobjects import getObject
def parseBool(s):
if s is None or s == str(None).lower():
return None
return str(s).lower() == str(True).lower()
def _splitParameterOptions(line):
tokens = line.split('=', 1)
if tokens[1].lower().strip().startswith('optional'):
isOptional = True
definition = tokens[1].strip()[len('optional') + 1:]
else:
isOptional = False
definition = tokens[1]
return isOptional, tokens[0], definition
def _createDescriptiveName(s):
return s.replace('_', ' ')
def _expressionContext():
context = QgsExpressionContext()
context.appendScope(QgsExpressionContextUtils.globalScope())
context.appendScope(QgsExpressionContextUtils.projectScope())
if iface.mapCanvas():
context.appendScope(QgsExpressionContextUtils.mapSettingsScope(iface.mapCanvas().mapSettings()))
processingScope = QgsExpressionContextScope()
extent = iface.mapCanvas().fullExtent()
processingScope.setVariable('fullextent_minx', extent.xMinimum())
processingScope.setVariable('fullextent_miny', extent.yMinimum())
processingScope.setVariable('fullextent_maxx', extent.xMaximum())
processingScope.setVariable('fullextent_maxy', extent.yMaximum())
context.appendScope(processingScope)
return context
def _resolveLayers(value):
layers = dataobjects.getAllLayers()
if value:
inputlayers = value.split(';')
for i, inputlayer in enumerate(inputlayers):
for layer in layers:
if layer.name() == inputlayer:
inputlayers[i] = layer.source()
break
return ";".join(inputlayers)
class Parameter(object):
"""
Base class for all parameters that a geoalgorithm might
take as input.
"""
default_metadata = {}
def __init__(self, name='', description='', default=None, optional=False,
metadata={}):
self.name = name
self.description = description
self.default = default
self.value = default
self.isAdvanced = False
# A hidden parameter can be used to set a hard-coded value.
# It can be used as any other parameter, but it will not be
# shown to the user
self.hidden = False
self.optional = parseBool(optional)
# TODO: make deep copy and deep update
self.metadata = deepcopy(self.default_metadata)
self.metadata.update(deepcopy(metadata))
def setValue(self, obj):
"""
Sets the value of the parameter.
Returns true if the value passed is correct for the type
of parameter.
"""
if obj is None:
if not self.optional:
return False
self.value = None
return True
self.value = str(obj)
return True
def setDefaultValue(self):
"""
Sets the value of the parameter to the default one
Returns true if the default value is correct for the type
of parameter.
"""
return self.setValue(self.default)
def __str__(self):
return u'{} <{}>'.format(self.name, self.__class__.__name__)
def getValueAsCommandLineParameter(self):
"""
Returns the value of this parameter as it should have been
entered in the console if calling an algorithm using the
Processing.runalg() method.
"""
return str(self.value)
def typeName(self):
return self.__class__.__name__.replace('Parameter', '').lower()
def todict(self):
o = deepcopy(self.__dict__)
del o['metadata']
return o
def tr(self, string, context=''):
if context == '':
context = 'Parameter'
return QCoreApplication.translate(context, string)
def wrapper(self, dialog, row=0, col=0):
wrapper = self.metadata.get('widget_wrapper', None)
# wrapper metadata should be a class path
if isinstance(wrapper, str):
tokens = wrapper.split('.')
mod = __import__('.'.join(tokens[:-1]), fromlist=[tokens[-1]])
wrapper = getattr(mod, tokens[-1])
# or directly a class object
if isclass(wrapper):
wrapper = wrapper(self, dialog, row, col)
# or a wrapper instance
return wrapper
def evaluate(self, alg):
pass
def evaluateForModeler(self, value, model):
return value
class ParameterBoolean(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.BooleanWidgetWrapper'
}
def __init__(self, name='', description='', default=None, optional=False, metadata={}):
Parameter.__init__(self, name, description, parseBool(default), optional, metadata)
def setValue(self, value):
if value is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(value, str):
self.value = str(value).lower() == str(True).lower()
else:
self.value = bool(value)
return True
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'boolean '
return '##' + self.name + '=' + param_type + str(self.default)
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("boolean"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('boolean') + 1:]
if default:
param = ParameterBoolean(name, descName, default)
else:
param = ParameterBoolean(name, descName)
param.optional = isOptional
return param
class ParameterCrs(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.CrsWidgetWrapper'
}
def __init__(self, name='', description='', default=None, optional=False, metadata={}):
'''The value is a string that uniquely identifies the
coordinate reference system. Typically it is the auth id of the CRS
(if the authority is EPSG) or proj4 string of the CRS (in case
of other authorities or user defined projections).'''
Parameter.__init__(self, name, description, default, optional, metadata)
def setValue(self, value):
if not bool(value):
if not self.optional:
return False
self.value = None
return True
if isinstance(value, QgsCoordinateReferenceSystem):
self.value = value.authid()
return True
if isinstance(value, QgsMapLayer):
self.value = value.crs().authid()
return True
try:
layer = dataobjects.getObjectFromUri(value)
if layer is not None:
self.value = layer.crs().authid()
return True
except:
pass
# TODO: check it is a valid authid
self.value = value
return True
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'crs '
return '##' + self.name + '=' + param_type + str(self.default)
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("crs"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('crs') + 1:]
if default:
return ParameterCrs(name, descName, default, isOptional)
else:
return ParameterCrs(name, descName, None, isOptional)
class ParameterDataObject(Parameter):
def getValueAsCommandLineParameter(self):
if self.value is None:
return str(None)
else:
s = dataobjects.normalizeLayerSource(str(self.value))
s = '"%s"' % s
return s
def evaluate(self, alg):
self.value = _resolveLayers(self.value)
class ParameterExtent(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.ExtentWidgetWrapper'
}
USE_MIN_COVERING_EXTENT = 'USE_MIN_COVERING_EXTENT'
def __init__(self, name='', description='', default=None, optional=True):
Parameter.__init__(self, name, description, default, optional)
# The value is a string in the form "xmin, xmax, ymin, ymax"
def setValue(self, value):
if not value:
if not self.optional:
return False
self.value = None
return True
if isinstance(value, QgsMapLayer):
rect = value.extent()
self.value = '{},{},{},{}'.format(
rect.xMinimum(), rect.xMaximum(), rect.yMinimum(), rect.yMaximum())
return True
try:
layer = dataobjects.getObjectFromUri(value)
if layer is not None:
rect = layer.extent()
self.value = '{},{},{},{}'.format(
rect.xMinimum(), rect.xMaximum(), rect.yMinimum(), rect.yMaximum())
return True
except:
pass
tokens = str(value).split(',')
if len(tokens) != 4:
return False
try:
float(tokens[0])
float(tokens[1])
float(tokens[2])
float(tokens[3])
self.value = value
return True
except:
return False
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'extent'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("extent"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('extent') + 1:] or None
return ParameterExtent(name, descName, default, isOptional)
def evaluate(self, alg):
if self.optional and not bool(self.value):
self.value = self.getMinCoveringExtent(alg)
def getMinCoveringExtent(self, alg):
first = True
found = False
for param in alg.parameters:
if param.value:
if isinstance(param, (ParameterRaster, ParameterVector)):
if isinstance(param.value, (QgsRasterLayer,
QgsVectorLayer)):
layer = param.value
else:
layer = dataobjects.getObject(param.value)
if layer:
found = True
self.addToRegion(layer, first)
first = False
elif isinstance(param, ParameterMultipleInput):
layers = param.value.split(';')
for layername in layers:
layer = dataobjects.getObject(layername)
if layer:
found = True
self.addToRegion(layer, first)
first = False
if found:
return '{},{},{},{}'.format(
self.xmin, self.xmax, self.ymin, self.ymax)
else:
return None
def addToRegion(self, layer, first):
if first:
self.xmin = layer.extent().xMinimum()
self.xmax = layer.extent().xMaximum()
self.ymin = layer.extent().yMinimum()
self.ymax = layer.extent().yMaximum()
else:
self.xmin = min(self.xmin, layer.extent().xMinimum())
self.xmax = max(self.xmax, layer.extent().xMaximum())
self.ymin = min(self.ymin, layer.extent().yMinimum())
self.ymax = max(self.ymax, layer.extent().yMaximum())
class ParameterPoint(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.PointWidgetWrapper'
}
def __init__(self, name='', description='', default=None, optional=False):
Parameter.__init__(self, name, description, default, optional)
# The value is a string in the form "x, y"
def setValue(self, text):
if text is None:
if not self.optional:
return False
self.value = None
return True
tokens = str(text).split(',')
if len(tokens) != 2:
return False
try:
float(tokens[0])
float(tokens[1])
self.value = text
return True
except:
return False
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'point'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("point"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('point') + 1:] or None
return ParameterPoint(name, descName, default, isOptional)
class ParameterFile(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.FileWidgetWrapper'
}
def __init__(self, name='', description='', isFolder=False, optional=True, ext=None):
Parameter.__init__(self, name, description, None, parseBool(optional))
self.ext = ext
self.isFolder = parseBool(isFolder)
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def setValue(self, obj):
if obj is None or obj.strip() == '':
if not self.optional:
return False
self.value = None if obj is None else obj.strip()
return True
if self.ext is not None and obj != '' and not obj.endswith(self.ext):
return False
self.value = str(obj)
return True
def typeName(self):
if self.isFolder:
return 'directory'
else:
return 'file'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
if self.isFolder:
param_type += 'folder'
else:
param_type += 'file'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("file") or definition.startswith("folder"):
descName = _createDescriptiveName(name)
return ParameterFile(name, descName, definition.startswith("folder"), isOptional)
class ParameterFixedTable(Parameter):
def __init__(self, name='', description='', numRows=3,
cols=['value'], fixedNumOfRows=False, optional=False):
Parameter.__init__(self, name, description, None, optional)
self.cols = cols
if isinstance(cols, str):
self.cols = self.cols.split(";")
self.numRows = int(numRows)
self.fixedNumOfRows = parseBool(fixedNumOfRows)
def setValue(self, obj):
if obj is None:
if not self.optional:
return False
self.value = None
return True
# TODO: check that it contains a correct number of elements
if isinstance(obj, str):
self.value = obj
else:
self.value = ParameterFixedTable.tableToString(obj)
return True
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
@staticmethod
def tableToString(table):
tablestring = ''
for i in range(len(table)):
for j in range(len(table[0])):
tablestring = tablestring + table[i][j] + ','
tablestring = tablestring[:-1]
return tablestring
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("point"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('point') + 1:] or None
return ParameterPoint(name, descName, default, isOptional)
class ParameterMultipleInput(ParameterDataObject):
"""A parameter representing several data objects.
Its value is a string with substrings separated by semicolons,
each of which represents the data source location of each element.
"""
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.MultipleInputWidgetWrapper'
}
exported = None
def __init__(self, name='', description='', datatype=-1, optional=False):
ParameterDataObject.__init__(self, name, description, None, optional)
self.datatype = int(float(datatype))
self.exported = None
self.minNumInputs = 0
""" Set minimum required number of inputs for parameter
By default minimal number of inputs is set to 1
@type _minNumInputs: numeric type or None
@param _minNumInputs: required minimum number of inputs for parameter. \
If user will pass None as parameter, we will use default minimal number of inputs (1)
@return: result, if the minimum number of inputs were set.
"""
def setMinNumInputs(self, _minNumInputs):
if _minNumInputs is None:
self.minNumInputs = 0
return True
if _minNumInputs < 1 and not self.optional:
# don't allow to set negative or null number of inputs if parameter isn't optional
return False
self.minNumInputs = int(_minNumInputs)
return True
""" Get minimum required number of inputs for parameter
@return: minimum number of inputs required for this parameter
@see: setMinNumInputs()
"""
def getMinNumInputs(self):
return self.minNumInputs
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, list):
if len(obj) == 0:
if self.optional:
self.value = None
return True
else:
return False
# prevent setting value if we didn't provide required minimal number of inputs
elif len(obj) < self.minNumInputs:
return False
self.value = ";".join([self.getAsString(lay) for lay in obj])
return True
else:
self.value = str(obj)
return True
def getSafeExportedLayers(self):
"""
Returns not the value entered by the user, but a string with
semicolon-separated filenames which contains the data of the
selected layers, but saved in a standard format (currently
shapefiles for vector layers and GeoTiff for raster) so that
they can be opened by most external applications.
If there is a selection and QGIS is configured to use just the
selection, it exports the layer even if it is already in a
suitable format.
Works only if the layer represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a layer in a suitable format,
it does no export at all and returns that value.
Currently, it works just for vector layer. In the case of
raster layers, it returns the parameter value.
The layers are exported just the first time the method is
called. The method can be called several times and it will
always return the same string, performing the export only the
first time.
"""
if self.exported:
return self.exported
self.exported = self.value
layers = self.value.split(';')
if layers is None or len(layers) == 0:
return self.value
if self.datatype == dataobjects.TYPE_RASTER:
for layerfile in layers:
layer = dataobjects.getObjectFromUri(layerfile, False)
if layer:
filename = dataobjects.exportRasterLayer(layer)
self.exported = self.exported.replace(layerfile, filename)
return self.exported
elif self.datatype == dataobjects.TYPE_FILE:
return self.value
else:
for layerfile in layers:
layer = dataobjects.getObjectFromUri(layerfile, False)
if layer:
filename = dataobjects.exportVectorLayer(layer)
self.exported = self.exported.replace(layerfile, filename)
return self.exported
def getAsString(self, value):
if self.datatype == dataobjects.TYPE_RASTER:
if isinstance(value, QgsRasterLayer):
return str(value.dataProvider().dataSourceUri())
else:
s = str(value)
layers = dataobjects.getRasterLayers()
for layer in layers:
if layer.name() == s:
return str(layer.dataProvider().dataSourceUri())
return s
if self.datatype == dataobjects.TYPE_FILE:
return str(value)
else:
if isinstance(value, QgsVectorLayer):
return str(value.source())
else:
s = str(value)
layers = dataobjects.getVectorLayers([self.datatype])
for layer in layers:
if layer.name() == s:
return str(layer.source())
return s
def getFileFilter(self):
if self.datatype == dataobjects.TYPE_RASTER:
exts = dataobjects.getSupportedOutputRasterLayerExtensions()
elif self.datatype == dataobjects.TYPE_FILE:
return self.tr('All files (*.*)', 'ParameterMultipleInput')
else:
exts = dataobjects.getSupportedOutputVectorLayerExtensions()
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterMultipleInput') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def dataType(self):
if self.datatype == dataobjects.TYPE_VECTOR_POINT:
return 'points'
elif self.datatype == dataobjects.TYPE_VECTOR_LINE:
return 'lines'
elif self.datatype == dataobjects.TYPE_VECTOR_POLYGON:
return 'polygons'
elif self.datatype == dataobjects.TYPE_RASTER:
return 'rasters'
elif self.datatype == dataobjects.TYPE_FILE:
return 'files'
else:
return 'any vectors'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
if self.datatype == dataobjects.TYPE_RASTER:
param_type += 'multiple raster'
if self.datatype == dataobjects.TYPE_FILE:
param_type += 'multiple file'
else:
param_type += 'multiple vector'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip() == 'multiple raster':
return ParameterMultipleInput(name, descName,
dataobjects.TYPE_RASTER, isOptional)
elif definition.lower().strip() == 'multiple vector':
return ParameterMultipleInput(name, definition,
dataobjects.TYPE_VECTOR_ANY, isOptional)
def evaluate(self, alg):
self.value = _resolveLayers(self.value)
class ParameterNumber(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.NumberWidgetWrapper'
}
def __init__(self, name='', description='', minValue=None, maxValue=None,
default=None, optional=False):
Parameter.__init__(self, name, description, default, optional)
if default is not None:
try:
self.default = int(str(default))
self.isInteger = True
except ValueError:
self.default = float(default)
self.isInteger = False
else:
self.isInteger = False
if minValue is not None:
self.min = int(float(minValue)) if self.isInteger else float(minValue)
else:
self.min = None
if maxValue is not None:
self.max = int(float(maxValue)) if self.isInteger else float(maxValue)
else:
self.max = None
self.value = self.default
def setValue(self, n):
if n is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(n, str):
try:
v = self._evaluate(n)
self.value = float(v)
if self.isInteger:
self.value = int(math.floor(self.value))
return True
except:
return False
else:
try:
if float(n) - int(float(n)) == 0:
value = int(float(n))
else:
value = float(n)
if self.min is not None:
if value < self.min:
return False
if self.max is not None:
if value > self.max:
return False
self.value = value
return True
except:
raise
return False
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'number'
code = '##' + self.name + '=' + param_type
if self.default:
code += str(self.default)
return code
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('number'):
default = definition.strip()[len('number'):] or None
if default == 'None':
default = None
return ParameterNumber(name, descName, default=default, optional=isOptional)
def _evaluate(self, value):
exp = QgsExpression(value)
if exp.hasParserError():
raise ValueError(self.tr("Error in parameter expression: ") + exp.parserErrorString())
result = exp.evaluate(_expressionContext())
if exp.hasEvalError():
raise ValueError("Error evaluating parameter expression: " + exp.evalErrorString())
if self.isInteger:
return math.floor(result)
else:
return result
def evaluate(self, alg):
if isinstance(self.value, str) and bool(self.value):
self.value = self._evaluate(self.value)
def _layerVariables(self, element, alg=None):
variables = {}
layer = getObject(element.value)
if layer is not None:
name = element.name if alg is None else "%s_%s" % (alg.name, element.name)
variables['@%s_minx' % name] = layer.extent().xMinimum()
variables['@%s_miny' % name] = layer.extent().yMinimum()
variables['@%s_maxx' % name] = layer.extent().yMaximum()
variables['@%s_maxy' % name] = layer.extent().yMaximum()
if isinstance(element, (ParameterRaster, OutputRaster)):
stats = layer.dataProvider().bandStatistics(1)
variables['@%s_avg' % name] = stats.mean
variables['@%s_stddev' % name] = stats.stdDev
variables['@%s_min' % name] = stats.minimumValue
variables['@%s_max' % name] = stats.maximumValue
return variables
def evaluateForModeler(self, value, model):
if isinstance(value, numbers.Number):
return value
variables = {}
for param in model.parameters:
if isinstance(param, ParameterNumber):
variables["@" + param.name] = param.value
if isinstance(param, (ParameterRaster, ParameterVector)):
variables.update(self._layerVariables(param))
for alg in list(model.algs.values()):
for out in alg.algorithm.outputs:
if isinstance(out, OutputNumber):
variables["@%s_%s" % (alg.name, out.name)] = out.value
if isinstance(out, (OutputRaster, OutputVector)):
variables.update(self._layerVariables(out, alg))
for k, v in list(variables.items()):
value = value.replace(k, str(v))
return value
def expressionContext(self):
return _expressionContext()
def getValueAsCommandLineParameter(self):
if self.value is None:
return str(None)
if isinstance(self.value, str):
return '"%s"' + self.value
return str(self.value)
class ParameterRange(Parameter):
def __init__(self, name='', description='', default=None, optional=False):
Parameter.__init__(self, name, description, default, optional)
if default is not None:
values = default.split(',')
try:
int(values[0])
int(values[1])
self.isInteger = True
except:
self.isInteger = False
else:
self.isInteger = False
def setValue(self, text):
if text is None:
if not self.optional:
return False
self.value = None
return True
tokens = text.split(',')
if len(tokens) != 2:
return False
try:
float(tokens[0])
float(tokens[1])
self.value = text
return True
except:
return False
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"' if self.value is not None else str(None)
class ParameterRaster(ParameterDataObject):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.RasterWidgetWrapper'
}
def __init__(self, name='', description='', optional=False, showSublayersDialog=True):
ParameterDataObject.__init__(self, name, description, None, optional)
self.showSublayersDialog = parseBool(showSublayersDialog)
self.exported = None
def getSafeExportedLayer(self):
"""Returns not the value entered by the user, but a string with
a filename which contains the data of this layer, but saved in
a standard format (currently always a geotiff file) so that it
can be opened by most external applications.
Works only if the layer represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a layer in a suitable format,
it does not export at all and returns that value.
The layer is exported just the first time the method is called.
The method can be called several times and it will always
return the same file, performing the export only the first
time.
"""
if self.exported:
return self.exported
layer = dataobjects.getObjectFromUri(self.value, False)
if layer:
self.exported = dataobjects.exportRasterLayer(layer)
else:
self.exported = self.value
return self.exported
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, QgsRasterLayer):
self.value = str(obj.dataProvider().dataSourceUri())
return True
else:
self.value = str(obj)
return True
def getFileFilter(self):
exts = dataobjects.getSupportedOutputRasterLayerExtensions()
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterRaster') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'raster'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('raster'):
return ParameterRaster(name, descName, optional=isOptional)
class ParameterSelection(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.SelectionWidgetWrapper'
}
def __init__(self, name='', description='', options=[], default=None, isSource=False,
multiple=False, optional=False):
Parameter.__init__(self, name, description, default, optional)
self.multiple = multiple
isSource = parseBool(isSource)
self.options = options
if isSource:
self.options = []
layer = QgsVectorLayer(options[0], "layer", "ogr")
if layer.isValid():
try:
index = resolveFieldIndex(layer, options[1])
feats = features(layer)
for feature in feats:
self.options.append(str(feature.attributes()[index]))
except ValueError:
pass
elif isinstance(self.options, str):
self.options = self.options.split(";")
if default is not None:
try:
self.default = int(default)
except:
self.default = 0
self.value = self.default
def setValue(self, value):
if value is None:
if not self.optional:
return False
self.value = 0
return True
if isinstance(value, list):
if not self.multiple:
return False
values = []
for v in value:
try:
n = int(v)
values.append(n)
except:
return False
if not self.optional and len(values) == 0:
return False
self.value = values
return True
else:
try:
n = int(value)
self.value = n
return True
except:
return False
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('selectionfromfile'):
options = definition.strip()[len('selectionfromfile '):].split(';')
return ParameterSelection(name, descName, options, isSource=True, optional=isOptional)
elif definition.lower().strip().startswith('selection'):
options = definition.strip()[len('selection '):].split(';')
return ParameterSelection(name, descName, options, optional=isOptional)
elif definition.lower().strip().startswith('multipleselectionfromfile'):
options = definition.strip()[len('multipleselectionfromfile '):].split(';')
return ParameterSelection(name, descName, options, isSource=True,
multiple=True, optional=isOptional)
elif definition.lower().strip().startswith('multipleselection'):
options = definition.strip()[len('multipleselection '):].split(';')
return ParameterSelection(name, descName, options, multiple=True, optional=isOptional)
class ParameterEvaluationException(Exception):
def __init__(self, param, msg):
Exception.__init__(msg)
self.param = param
class ParameterString(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.StringWidgetWrapper'
}
NEWLINE = '\n'
ESCAPED_NEWLINE = '\\n'
def __init__(self, name='', description='', default=None, multiline=False,
optional=False, evaluateExpressions=False):
Parameter.__init__(self, name, description, default, optional)
self.multiline = parseBool(multiline)
self.evaluateExpressions = parseBool(evaluateExpressions)
def setValue(self, obj):
if not bool(obj):
if not self.optional:
return False
self.value = None
return True
self.value = str(obj).replace(
ParameterString.ESCAPED_NEWLINE,
ParameterString.NEWLINE
)
return True
def getValueAsCommandLineParameter(self):
return ('"' + str(self.value.replace(ParameterString.NEWLINE,
ParameterString.ESCAPED_NEWLINE)) + '"'
if self.value is not None else str(None))
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'string'
return '##' + self.name + '=' + param_type + self.default
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('string'):
default = definition.strip()[len('string') + 1:]
if default:
return ParameterString(name, descName, default, optional=isOptional)
else:
return ParameterString(name, descName, optional=isOptional)
elif definition.lower().strip().startswith('longstring'):
default = definition.strip()[len('longstring') + 1:]
if default:
return ParameterString(name, descName, default, multiline=True, optional=isOptional)
else:
return ParameterString(name, descName, multiline=True, optional=isOptional)
def evaluate(self, alg):
if isinstance(self.value, str) and bool(self.value) and self.evaluateExpressions:
exp = QgsExpression(self.value)
if exp.hasParserError():
raise ValueError(self.tr("Error in parameter expression: ") + exp.parserErrorString())
result = exp.evaluate(_expressionContext())
if exp.hasEvalError():
raise ValueError("Error evaluating parameter expression: " + exp.evalErrorString())
self.value = result
def expressionContext(self):
return _expressionContext()
class ParameterExpression(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.ExpressionWidgetWrapper'
}
NEWLINE = '\n'
ESCAPED_NEWLINE = '\\n'
def __init__(self, name='', description='', default=None, optional=False, parent_layer=None):
Parameter.__init__(self, name, description, default, optional)
self.parent_layer = parent_layer
def setValue(self, obj):
if not bool(obj):
if not self.optional:
return False
self.value = None
return True
self.value = str(obj).replace(
ParameterString.ESCAPED_NEWLINE,
ParameterString.NEWLINE
)
return True
def getValueAsCommandLineParameter(self):
return ('"' + str(self.value.replace(ParameterExpression.NEWLINE,
ParameterExpression.ESCAPED_NEWLINE)) + '"'
if self.value is not None else str(None))
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'expression'
return '##' + self.name + '=' + param_type + self.default
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.lower().strip().startswith('expression'):
descName = _createDescriptiveName(name)
default = definition.strip()[len('expression') + 1:]
if default:
return ParameterExpression(name, descName, default, optional=isOptional)
else:
return ParameterExpression(name, descName, optional=isOptional)
class ParameterTable(ParameterDataObject):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.TableWidgetWrapper'
}
def __init__(self, name='', description='', optional=False):
ParameterDataObject.__init__(self, name, description, None, optional)
self.exported = None
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, QgsVectorLayer):
source = str(obj.source())
self.value = source
return True
else:
self.value = str(obj)
layers = dataobjects.getTables()
for layer in layers:
if layer.name() == self.value or layer.source() == self.value:
source = str(layer.source())
self.value = source
return True
val = str(obj)
self.value = val
return os.path.exists(self.value)
def getSafeExportedTable(self):
"""Returns not the value entered by the user, but a string with
a filename which contains the data of this table, but saved in
a standard format (currently always a DBF file) so that it can
be opened by most external applications.
Works only if the table represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a table in a suitable format,
it does not export at all and returns that value.
The table is exported just the first time the method is called.
The method can be called several times and it will always
return the same file, performing the export only the first
time.
"""
if self.exported:
return self.exported
table = dataobjects.getObjectFromUri(self.value, False)
if table:
self.exported = dataobjects.exportTable(table)
else:
self.exported = self.value
return self.exported
def getFileFilter(self):
exts = ['csv', 'dbf']
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterTable') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'table'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('table'):
return ParameterTable(name, descName, isOptional)
class ParameterTableField(Parameter):
"""A parameter representing a table field.
Its value is a string that represents the name of the field.
"""
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.TableFieldWidgetWrapper'
}
DATA_TYPE_NUMBER = 0
DATA_TYPE_STRING = 1
DATA_TYPE_DATETIME = 2
DATA_TYPE_ANY = -1
def __init__(self, name='', description='', parent=None, datatype=-1,
optional=False, multiple=False):
Parameter.__init__(self, name, description, None, optional)
self.parent = parent
self.multiple = multiple
self.datatype = int(datatype)
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"' if self.value is not None else str(None)
def setValue(self, value):
if not bool(value):
if not self.optional:
return False
self.value = None
return True
if isinstance(value, list):
if not self.multiple and len(value) > 1:
return False
self.value = ";".join(value)
return True
else:
self.value = str(value)
return True
def __str__(self):
return self.name + ' <' + self.__module__.split('.')[-1] + ' from ' \
+ self.parent + '>'
def dataType(self):
if self.datatype == self.DATA_TYPE_NUMBER:
return 'numeric'
elif self.datatype == self.DATA_TYPE_STRING:
return 'string'
elif self.datatype == self.DATA_TYPE_DATETIME:
return 'datetime'
else:
return 'any'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'field'
return '##' + self.name + '=' + param_type + self.parent
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('field'):
if definition.lower().strip().startswith('field number'):
parent = definition.strip()[len('field number') + 1:]
datatype = ParameterTableField.DATA_TYPE_NUMBER
elif definition.lower().strip().startswith('field string'):
parent = definition.strip()[len('field string') + 1:]
datatype = ParameterTableField.DATA_TYPE_STRING
elif definition.lower().strip().startswith('field datetime'):
parent = definition.strip()[len('field datetime') + 1:]
datatype = ParameterTableField.DATA_TYPE_DATETIME
else:
parent = definition.strip()[len('field') + 1:]
datatype = ParameterTableField.DATA_TYPE_ANY
return ParameterTableField(name, descName, parent, datatype, isOptional)
class ParameterVector(ParameterDataObject):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.VectorWidgetWrapper'
}
def __init__(self, name='', description='', datatype=[-1],
optional=False):
ParameterDataObject.__init__(self, name, description, None, optional)
if isinstance(datatype, int):
datatype = [datatype]
elif isinstance(datatype, str):
datatype = [int(t) for t in datatype.split(',')]
self.datatype = datatype
self.exported = None
self.allowOnlyOpenedLayers = False
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, QgsVectorLayer):
self.value = str(obj.source())
return True
else:
self.value = str(obj)
return True
def getSafeExportedLayer(self):
"""Returns not the value entered by the user, but a string with
a filename which contains the data of this layer, but saved in
a standard format (currently always a shapefile) so that it can
be opened by most external applications.
If there is a selection and QGIS is configured to use just the
selection, if exports the layer even if it is already in a
suitable format.
Works only if the layer represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a layer in a suitable format,
it does not export at all and returns that value.
The layer is exported just the first time the method is called.
The method can be called several times and it will always
return the same file, performing the export only the first
time.
"""
if self.exported:
return self.exported
layer = dataobjects.getObjectFromUri(self.value, False)
if layer:
self.exported = dataobjects.exportVectorLayer(layer)
else:
self.exported = self.value
return self.exported
def getFileFilter(self):
exts = dataobjects.getSupportedOutputVectorLayerExtensions()
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterVector') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def dataType(self):
return dataobjects.vectorDataType(self)
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'vector'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip() == 'vector':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_ANY], isOptional)
elif definition.lower().strip() == 'vector point':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_POINT], isOptional)
elif definition.lower().strip() == 'vector line':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_LINE], isOptional)
elif definition.lower().strip() == 'vector polygon':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_POLYGON], isOptional)
class ParameterGeometryPredicate(Parameter):
predicates = ('intersects',
'contains',
'disjoint',
'equals',
'touches',
'overlaps',
'within',
'crosses')
def __init__(self, name='', description='', left=None, right=None,
optional=False, enabledPredicates=None):
Parameter.__init__(self, name, description, None, optional)
self.left = left
self.right = right
self.value = None
self.enabledPredicates = enabledPredicates
if self.enabledPredicates is None:
self.enabledPredicates = self.predicates
def getValueAsCommandLineParameter(self):
return str(self.value)
def setValue(self, value):
if value is None:
if not self.optional:
return False
self.value = None
return True
elif len(value) == 0 and not self.optional:
return False
if isinstance(value, str):
self.value = value.split(';') # relates to ModelerAlgorithm.resolveValue
else:
self.value = value
return True
paramClasses = [c for c in list(sys.modules[__name__].__dict__.values()) if isclass(c) and issubclass(c, Parameter)]
def getParameterFromString(s):
# Try the parameter definitions used in description files
if '|' in s and (s.startswith("Parameter") or s.startswith("*Parameter")):
isAdvanced = False
if s.startswith("*"):
s = s[1:]
isAdvanced = True
tokens = s.split("|")
params = [t if str(t) != str(None) else None for t in tokens[1:]]
try:
clazz = getattr(sys.modules[__name__], tokens[0])
param = clazz(*params)
param.isAdvanced = isAdvanced
return param
except:
return None
else: # try script syntax
for paramClass in paramClasses:
try:
param = paramClass.fromScriptCode(s)
if param is not None:
return param
except AttributeError:
pass
except:
return None
| gpl-2.0 |
andrewk1/Climb-Bot | climb-bot.py | 1 | 3083 | import praw
import requests
import json
import time
import re
# Function iterates over each submission title and checks if the title contains route syntax that indicates the post is about a route
def parse_titles(bot, subreddit):
start_time = time.time()
for submission in subreddit.stream.submissions():
if (submission.created_utc < start_time):
continue
title = submission.title
# regex matches sequence of capitalized words followed by climb grade notation (V or 5.)
route_regex = '([A-Z][a-z]+(?=\s[A-Z])(?:\s[A-Z][a-z]+)+) [( ]?(5.[0-9][0-9]?[A-Za-z]|[Vv][0-9][0-9]?)'
route_name = re.search(route_regex, title)
print route_name
comment = make_get_request(route_name.group(0))
if comment != 'NA':
submission.reply(comment)
# Call custom google search engine API to parse the formulated title and gather theCrag's metadata for the route
def make_get_request(route):
key = 'key=***'
cx = 'cx=***'
query= 'q='+route
google_url = 'https://www.googleapis.com/customsearch/v1?' + key + cx + query
response = requests.get(google_url)
parsed_response= json.loads(response.text)
return form_post(parsed_response)
# Extract data from google's JSON response and form a post
def form_post(parsed_response):
# Check if Google search received a hit
if parsed_response['searchInformation']['totalResults'] == 0 or 'items' not in parsed_response:
return 'NA'
title = parsed_response['items'][0]['title']
print title
breadcrumb = parsed_response['items'][0]['pagemap']['breadcrumb']
count = 0
# Build up region string
region_string = ''
for key in breadcrumb:
region = breadcrumb[count]['title']
if (count > 0) :
region_string = region + ', ' + region_string
else :
region_string = region;
count+=1
metatags = parsed_response['items'][0]['pagemap']['metatags']
country = breadcrumb[0]['title']
latitude = metatags[0]['place:location:latitude']
longitude = metatags[0]['place:location:longitude']
google_pin = 'https://www.google.com/maps/@?api=1&map_action=map&basemap=satellite&zoom=19¢er=' + latitude + ',' + longitude
link = metatags[0]['og:url']
if (' in ' in title):
title = title[:title.index(' in ')]
# Truncate values to 3rd decimal place
lat_decimal = latitude.index('.')
latitude = latitude[:lat_decimal+4]
long_decimal = longitude.index('.')
longitude = longitude[:long_decimal+4]
# Format comment response
return 'I found a route! [' + title + '](' + link + ') in ' + region_string + '\n\nGPS Location: [' + latitude + ', ' + longitude + ']('+google_pin+')' + '\n\n ' + '\n\n^^^I ^^^am ^^^a ^^^bot ^^^| ^^^Data ^^^from ^^^[theCrag.com](https://www.thecrag.com/) ^^^| ^^^Feedback ^^^welcome ^^^at ^^^[r/climbBot](https://www.reddit.com/r/climbBot/)'
if __name__ == "__main__":
bot = praw.Reddit(
user_agent='climb-bot posts additional information on climbing routes it finds, created by /u/Akondrich, email: [email protected]',
client_id='***',
client_secret='***',
username='climb-bot',
password='***')
subreddit = bot.subreddit('climbBot')
parse_titles(bot, subreddit)
| mit |
UXE/local-edx | cms/envs/common.py | 1 | 24184 | # -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-import, unused-wildcard-import
import imp
import os
import sys
import lms.envs.common
# Although this module itself may not use these imported variables, other dependent modules may.
from lms.envs.common import (
USE_TZ, TECH_SUPPORT_EMAIL, PLATFORM_NAME, BUGS_EMAIL, DOC_STORE_CONFIG, ALL_LANGUAGES, WIKI_ENABLED, MODULESTORE,
update_module_store_settings, ASSET_IGNORE_REGEX
)
from path import path
from warnings import simplefilter
from lms.lib.xblock.mixin import LmsBlockMixin
from dealer.git import git
from xmodule.modulestore.edit_info import EditInfoMixin
############################ FEATURE CONFIGURATION #############################
FEATURES = {
'USE_DJANGO_PIPELINE': True,
'GITHUB_PUSH': False,
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the ones in lms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True,
'AUTH_USE_CERTIFICATES': False,
# email address for studio staff (eg to request course creation)
'STUDIO_REQUEST_EMAIL': '',
# Segment.io - must explicitly turn it on for production
'SEGMENT_IO': False,
# Enable URL that shows information about the status of various services
'ENABLE_SERVICE_STATUS': False,
# Don't autoplay videos for course authors
'AUTOPLAY_VIDEOS': False,
# If set to True, new Studio users won't be able to author courses unless
# edX has explicitly added them to the course creator group.
'ENABLE_CREATOR_GROUP': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': False,
# If set to True, Studio won't restrict the set of advanced components
# to just those pre-approved by edX
'ALLOW_ALL_ADVANCED_COMPONENTS': False,
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False,
# Allow editing of short description in course settings in cms
'EDITABLE_SHORT_DESCRIPTION': True,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': False,
# Toggles the embargo functionality, which enable embargoing for particular courses
'EMBARGO': False,
# Toggles the embargo site functionality, which enable embargoing for the whole site
'SITE_EMBARGOED': False,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
# Allow creating courses with non-ascii characters in the course id
'ALLOW_UNICODE_COURSE_ID': False,
# Prevent concurrent logins per user
'PREVENT_CONCURRENT_LOGINS': False,
# Turn off Advanced Security by default
'ADVANCED_SECURITY': False,
# Modulestore to use for new courses
'DEFAULT_STORE_FOR_NEW_COURSE': None,
}
ENABLE_JASMINE = False
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/cms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
LMS_ROOT = REPO_ROOT / "lms"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
GITHUB_REPO_ROOT = ENV_ROOT / "data"
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'lib')
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
GEOIPV6_PATH = REPO_ROOT / "common/static/data/geoip/GeoIPv6.dat"
############################# WEB CONFIGURATION #############################
# This is where we stick our compiled template files.
import tempfile
MAKO_MODULE_DIR = os.path.join(tempfile.gettempdir(), 'mako_cms')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [
PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_js' / 'templates',
]
for namespace, template_dirs in lms.envs.common.MAKO_TEMPLATES.iteritems():
MAKO_TEMPLATES['lms.' + namespace] = template_dirs
TEMPLATE_DIRS = MAKO_TEMPLATES['main']
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/signin'
LOGIN_URL = EDX_ROOT_URL + '/signin'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.core.context_processors.csrf',
'dealer.contrib.django.staff.context_processor', # access git revision
'contentstore.context_processors.doc_url',
)
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
LMS_BASE = None
# These are standard regexes for pulling out info like course_ids, usage_ids, etc.
# They are used so that URLs with deprecated-format strings still work.
from lms.envs.common import (
COURSE_KEY_PATTERN, COURSE_ID_PATTERN, USAGE_KEY_PATTERN, ASSET_KEY_PATTERN
)
#################### CAPA External Code Evaluation #############################
XQUEUE_INTERFACE = {
'url': 'http://localhost:8888',
'django_auth': {'username': 'local',
'password': 'local'},
'basic_auth': None,
}
################################# Deprecation warnings #####################
# Ignore deprecation warnings (so we don't clutter Jenkins builds/production)
simplefilter('ignore')
################################# Middleware ###################################
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'request_cache.middleware.RequestCache',
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'method_override.middleware.MethodOverrideMiddleware',
# Instead of AuthenticationMiddleware, we use a cache-backed version
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'crum.CurrentRequestUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
# Allows us to dark-launch particular languages
'dark_lang.middleware.DarkLangMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Detects user-requested locale from 'accept-language' header in http request
'django.middleware.locale.LocaleMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# needs to run after locale middleware (or anything that modifies the request context)
'edxmako.middleware.MakoMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
############# XBlock Configuration ##########
# Import after sys.path fixup
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore import prefer_xmodules
from xmodule.x_module import XModuleMixin
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (LmsBlockMixin, InheritanceMixin, XModuleMixin, EditInfoMixin)
# Allow any XBlock in Studio
# You should also enable the ALLOW_ALL_ADVANCED_COMPONENTS feature flag, so that
# xblocks can be added via advanced settings
XBLOCK_SELECT_FUNCTION = prefer_xmodules
############################ Modulestore Configuration ################################
MODULESTORE_BRANCH = 'draft-preferred'
############################ DJANGO_BUILTINS ################################
# Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here
DEBUG = False
TEMPLATE_DEBUG = False
SESSION_COOKIE_SECURE = False
# Site info
SITE_ID = 1
SITE_NAME = "localhost:8001"
HTTPS = 'on'
ROOT_URLCONF = 'cms.urls'
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
EMAIL_USE_TLS = False
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
DEFAULT_FROM_EMAIL = '[email protected]'
DEFAULT_FEEDBACK_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
ADMINS = ()
MANAGERS = ADMINS
# Static content
STATIC_URL = '/static/' + git.revision + "/"
STATIC_ROOT = ENV_ROOT / "staticfiles" / git.revision
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
LMS_ROOT / "static",
# This is how you would use the textbook images locally
# ("book", ENV_ROOT / "book_images"),
]
# Locale/Internationalization
TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGES_BIDI = lms.envs.common.LANGUAGES_BIDI
LANGUAGES = lms.envs.common.LANGUAGES
LANGUAGE_DICT = dict(LANGUAGES)
USE_I18N = True
USE_L10N = True
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
##### EMBARGO #####
EMBARGO_SITE_REDIRECT_URL = None
############################### Pipeline #######################################
STATICFILES_STORAGE = 'cms.lib.django_require.staticstorage.OptimizedCachedRequireJsStorage'
from rooted_paths import rooted_glob
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/normalize.css',
'css/vendor/font-awesome.css',
'css/vendor/html5-input-polyfills/number-polyfill.css',
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
'css/vendor/jquery.qtip.min.css',
'js/vendor/markitup/skins/simple/style.css',
'js/vendor/markitup/sets/wiki/style.css',
],
'output_filename': 'css/cms-style-vendor.css',
},
'style-vendor-tinymce-content': {
'source_filenames': [
'css/tinymce-studio-content-fonts.css',
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css',
'css/tinymce-studio-content.css'
],
'output_filename': 'css/cms-style-vendor-tinymce-content.css',
},
'style-vendor-tinymce-skin': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css'
],
'output_filename': 'css/cms-style-vendor-tinymce-skin.css',
},
'style-app': {
'source_filenames': [
'sass/style-app.css',
],
'output_filename': 'css/cms-style-app.css',
},
'style-app-extend1': {
'source_filenames': [
'sass/style-app-extend1.css',
],
'output_filename': 'css/cms-style-app-extend1.css',
},
'style-app-rtl': {
'source_filenames': [
'sass/style-app-rtl.css',
],
'output_filename': 'css/cms-style-app-rtl.css',
},
'style-app-extend1-rtl': {
'source_filenames': [
'sass/style-app-extend1-rtl.css',
],
'output_filename': 'css/cms-style-app-extend1-rtl.css',
},
'style-xmodule': {
'source_filenames': [
'sass/style-xmodule.css',
],
'output_filename': 'css/cms-style-xmodule.css',
},
'style-xmodule-rtl': {
'source_filenames': [
'sass/style-xmodule-rtl.css',
],
'output_filename': 'css/cms-style-xmodule-rtl.css',
},
'style-xmodule-annotations': {
'source_filenames': [
'css/vendor/ova/annotator.css',
'css/vendor/ova/edx-annotator.css',
'css/vendor/ova/video-js.min.css',
'css/vendor/ova/rangeslider.css',
'css/vendor/ova/share-annotator.css',
'css/vendor/ova/richText-annotator.css',
'css/vendor/ova/tags-annotator.css',
'css/vendor/ova/flagging-annotator.css',
'css/vendor/ova/diacritic-annotator.css',
'css/vendor/ova/grouping-annotator.css',
'css/vendor/ova/ova.css',
'js/vendor/ova/catch/css/main.css'
],
'output_filename': 'css/cms-style-xmodule-annotations.css',
},
}
# test_order: Determines the position of this chunk of javascript on
# the jasmine test page
PIPELINE_JS = {
'module-js': {
'source_filenames': (
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/modules/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'coffee/src/discussion/*.js')
),
'output_filename': 'js/cms-modules.js',
'test_order': 1
},
}
PIPELINE_COMPILERS = (
'pipeline.compilers.coffee.CoffeeScriptCompiler',
)
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = None
STATICFILES_IGNORE_PATTERNS = (
"*.py",
"*.pyc"
# it would be nice if we could do, for example, "**/*.scss",
# but these strings get passed down to the `fnmatch` module,
# which doesn't support that. :(
# http://docs.python.org/2/library/fnmatch.html
"sass/*.scss",
"sass/*/*.scss",
"sass/*/*/*.scss",
"sass/*/*/*/*.scss",
"coffee/*.coffee",
"coffee/*/*.coffee",
"coffee/*/*/*.coffee",
"coffee/*/*/*/*.coffee",
# Symlinks used by js-test-tool
"xmodule_js",
"common_static",
)
PIPELINE_YUI_BINARY = 'yui-compressor'
################################# DJANGO-REQUIRE ###############################
# The baseUrl to pass to the r.js optimizer, relative to STATIC_ROOT.
REQUIRE_BASE_URL = "./"
# The name of a build profile to use for your project, relative to REQUIRE_BASE_URL.
# A sensible value would be 'app.build.js'. Leave blank to use the built-in default build profile.
# Set to False to disable running the default profile (e.g. if only using it to build Standalone
# Modules)
REQUIRE_BUILD_PROFILE = "build.js"
# The name of the require.js script used by your project, relative to REQUIRE_BASE_URL.
REQUIRE_JS = "js/vendor/require.js"
# A dictionary of standalone modules to build with almond.js.
REQUIRE_STANDALONE_MODULES = {}
# Whether to run django-require in debug mode.
REQUIRE_DEBUG = False
# A tuple of files to exclude from the compilation result of r.js.
REQUIRE_EXCLUDE = ("build.txt",)
# The execution environment in which to run r.js: auto, node or rhino.
# auto will autodetect the environment and make use of node if available and rhino if not.
# It can also be a path to a custom class that subclasses require.environments.Environment and defines some "args" function that returns a list with the command arguments to execute.
REQUIRE_ENVIRONMENT = "node"
################################# CELERY ######################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############################## Video ##########################################
YOUTUBE = {
# YouTube JavaScript API
'API': 'www.youtube.com/iframe_api',
# URL to test YouTube availability
'TEST_URL': 'gdata.youtube.com/feeds/api/videos/',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
}
############################ APPS #####################################
INSTALLED_APPS = (
# Standard apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'djcelery',
'south',
'method_override',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# Testing
'django_nose',
# For CMS
'contentstore',
'course_creators',
'student', # misleading name due to sharing with lms
'openedx.core.djangoapps.course_groups', # not used in cms (yet), but tests run
# Tracking
'track',
'eventtracking.django',
# Monitoring
'datadog',
# For asset pipelining
'edxmako',
'pipeline',
'staticfiles',
'static_replace',
'require',
# comment common
'django_comment_common',
# for course creator table
'django.contrib.admin',
# for managing course modes
'course_modes',
# Dark-launching languages
'dark_lang',
# Student identity reverification
'reverification',
# User preferences
'openedx.core.djangoapps.user_api',
'django_openid_auth',
'embargo',
# Monitoring signals
'monitoring',
# Course action state
'course_action_state',
# Additional problem types
'edx_jsme', # Molecular Structure
)
################# EDX MARKETING SITE ##################################
EDXMKTG_COOKIE_NAME = 'edxloggedin'
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
}
COURSES_WITH_UNSAFE_CODE = []
############################## EVENT TRACKING #################################
TRACK_MAX_EVENT = 50000
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat']
EVENT_TRACKING_ENABLED = True
EVENT_TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'eventtracking.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking',
'max_event_size': TRACK_MAX_EVENT,
}
}
}
EVENT_TRACKING_PROCESSORS = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = None
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
### Apps only installed in some instances
OPTIONAL_APPS = (
'mentoring',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.fileupload',
'openassessment.workflow',
'openassessment.xblock',
# edxval
'edxval'
)
for app_name in OPTIONAL_APPS:
# First attempt to only find the module rather than actually importing it,
# to avoid circular references - only try to import if it can't be found
# by find_module, which doesn't work with import hooks
try:
imp.find_module(app_name)
except ImportError:
try:
__import__(app_name)
except ImportError:
continue
INSTALLED_APPS += (app_name,)
### ADVANCED_SECURITY_CONFIG
# Empty by default
ADVANCED_SECURITY_CONFIG = {}
### External auth usage -- prefixes for ENROLLMENT_DOMAIN
SHIBBOLETH_DOMAIN_PREFIX = 'shib:'
OPENID_DOMAIN_PREFIX = 'openid:'
### Size of chunks into which asset uploads will be divided
UPLOAD_CHUNK_SIZE_IN_MB = 10
### Max size of asset uploads to GridFS
MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB = 10
# FAQ url to direct users to if they upload
# a file that exceeds the above size
MAX_ASSET_UPLOAD_FILE_SIZE_URL = ""
################ ADVANCED_COMPONENT_TYPES ###############
ADVANCED_COMPONENT_TYPES = [
'annotatable',
'textannotation', # module for annotating text (with annotation table)
'videoannotation', # module for annotating video (with annotation table)
'imageannotation', # module for annotating image (with annotation table)
'word_cloud',
'graphical_slider_tool',
'lti',
# XBlocks from pmitros repos are prototypes. They should not be used
# except for edX Learning Sciences experiments on edge.edx.org without
# further work to make them robust, maintainable, finalize data formats,
# etc.
'concept', # Concept mapper. See https://github.com/pmitros/ConceptXBlock
'done', # Lets students mark things as done. See https://github.com/pmitros/DoneXBlock
'audio', # Embed an audio file. See https://github.com/pmitros/AudioXBlock
'recommender', # Crowdsourced recommender. Prototype by dli&pmitros. Intended for roll-out in one place in one course.
'profile', # Prototype user profile XBlock. Used to test XBlock parameter passing. See https://github.com/pmitros/ProfileXBlock
'split_test',
'combinedopenended',
'peergrading',
'notes',
]
# Adding components in this list will disable the creation of new problem for those
# compoenents in studio. Existing problems will work fine and one can edit them in studio
DEPRECATED_ADVANCED_COMPONENT_TYPES = []
# Specify xblocks that should be treated as advanced problems. Each entry is a tuple
# specifying the xblock name and an optional YAML template to be used.
ADVANCED_PROBLEM_TYPES = [
{
'component': 'openassessment',
'boilerplate_name': None,
}
]
| agpl-3.0 |
Phoenix-Silver/Zte-Blade-New-35-kernel | tools/perf/scripts/python/syscall-counts.py | 944 | 1429 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40d %10d\n" % (id, val),
| gpl-2.0 |
yuxng/Deep_ISM | ISM/lib/setup.py | 1 | 6351 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def find_in_path(name, path):
"Find a file in a search path"
#adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.iteritems():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.cython_bbox",
["utils/bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
),
Extension(
"utils.cython_nms",
["utils/nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
),
Extension(
"nms.cpu_nms",
["nms/cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('nms.gpu_nms',
['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
),
Extension('normals.gpu_normals',
['normals/compute_normals.cu', 'normals/gpu_normals.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include'], '/usr/local/include/eigen3']
)
]
setup(
name='fast_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
| mit |
bev-a-tron/pledgeservice | testlib/waitress/receiver.py | 39 | 4849 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Data Chunk Receiver
"""
from waitress.utilities import find_double_newline
from waitress.utilities import BadRequest
class FixedStreamReceiver(object):
# See IStreamConsumer
completed = False
error = None
def __init__(self, cl, buf):
self.remain = cl
self.buf = buf
def __len__(self):
return self.buf.__len__()
def received(self, data):
'See IStreamConsumer'
rm = self.remain
if rm < 1:
self.completed = True # Avoid any chance of spinning
return 0
datalen = len(data)
if rm <= datalen:
self.buf.append(data[:rm])
self.remain = 0
self.completed = True
return rm
else:
self.buf.append(data)
self.remain -= datalen
return datalen
def getfile(self):
return self.buf.getfile()
def getbuf(self):
return self.buf
class ChunkedReceiver(object):
chunk_remainder = 0
control_line = b''
all_chunks_received = False
trailer = b''
completed = False
error = None
# max_control_line = 1024
# max_trailer = 65536
def __init__(self, buf):
self.buf = buf
def __len__(self):
return self.buf.__len__()
def received(self, s):
# Returns the number of bytes consumed.
if self.completed:
return 0
orig_size = len(s)
while s:
rm = self.chunk_remainder
if rm > 0:
# Receive the remainder of a chunk.
to_write = s[:rm]
self.buf.append(to_write)
written = len(to_write)
s = s[written:]
self.chunk_remainder -= written
elif not self.all_chunks_received:
# Receive a control line.
s = self.control_line + s
pos = s.find(b'\n')
if pos < 0:
# Control line not finished.
self.control_line = s
s = ''
else:
# Control line finished.
line = s[:pos]
s = s[pos + 1:]
self.control_line = b''
line = line.strip()
if line:
# Begin a new chunk.
semi = line.find(b';')
if semi >= 0:
# discard extension info.
line = line[:semi]
try:
sz = int(line.strip(), 16) # hexadecimal
except ValueError: # garbage in input
self.error = BadRequest(
'garbage in chunked encoding input')
sz = 0
if sz > 0:
# Start a new chunk.
self.chunk_remainder = sz
else:
# Finished chunks.
self.all_chunks_received = True
# else expect a control line.
else:
# Receive the trailer.
trailer = self.trailer + s
if trailer.startswith(b'\r\n'):
# No trailer.
self.completed = True
return orig_size - (len(trailer) - 2)
elif trailer.startswith(b'\n'):
# No trailer.
self.completed = True
return orig_size - (len(trailer) - 1)
pos = find_double_newline(trailer)
if pos < 0:
# Trailer not finished.
self.trailer = trailer
s = b''
else:
# Finished the trailer.
self.completed = True
self.trailer = trailer[:pos]
return orig_size - (len(trailer) - pos)
return orig_size
def getfile(self):
return self.buf.getfile()
def getbuf(self):
return self.buf
| agpl-3.0 |
havt/odoo | addons/stock_account/stock_account.py | 89 | 19895 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID, api
import logging
_logger = logging.getLogger(__name__)
class stock_inventory(osv.osv):
_inherit = "stock.inventory"
_columns = {
'period_id': fields.many2one('account.period', 'Force Valuation Period', help="Choose the accounting period where you want to value the stock moves created by the inventory instead of the default one (chosen by the inventory end date)"),
}
def post_inventory(self, cr, uid, inv, context=None):
if context is None:
context = {}
ctx = context.copy()
if inv.period_id:
ctx['force_period'] = inv.period_id.id
return super(stock_inventory, self).post_inventory(cr, uid, inv, context=ctx)
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_inherit = "stock.location"
_columns = {
'valuation_in_account_id': fields.many2one('account.account', 'Stock Valuation Account (Incoming)', domain=[('type', '=', 'other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved from an internal location "
"into this location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
'valuation_out_account_id': fields.many2one('account.account', 'Stock Valuation Account (Outgoing)', domain=[('type', '=', 'other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved out of this location "
"and into an internal location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
}
#----------------------------------------------------------
# Quants
#----------------------------------------------------------
class stock_quant(osv.osv):
_inherit = "stock.quant"
def _get_inventory_value(self, cr, uid, quant, context=None):
if quant.product_id.cost_method in ('real'):
return quant.cost * quant.qty
return super(stock_quant, self)._get_inventory_value(cr, uid, quant, context=context)
@api.cr_uid_ids_context
def _price_update(self, cr, uid, quant_ids, newprice, context=None):
''' This function is called at the end of negative quant reconciliation and does the accounting entries adjustemnts and the update of the product cost price if needed
'''
if context is None:
context = {}
account_period = self.pool['account.period']
super(stock_quant, self)._price_update(cr, uid, quant_ids, newprice, context=context)
for quant in self.browse(cr, uid, quant_ids, context=context):
move = self._get_latest_move(cr, uid, quant, context=context)
valuation_update = newprice - quant.cost
# this is where we post accounting entries for adjustment, if needed
if not quant.company_id.currency_id.is_zero(valuation_update):
# adjustment journal entry needed, cost has been updated
period_id = (context.get('force_period') or
account_period.find(cr, uid, move.date, context=context)[0])
period = account_period.browse(cr, uid, period_id, context=context)
# If neg quant period already closed (likely with manual valuation), skip update
if period.state != 'done':
ctx = dict(context, force_valuation_amount=valuation_update)
self._account_entry_move(cr, uid, [quant], move, context=ctx)
#update the standard price of the product, only if we would have done it if we'd have had enough stock at first, which means
#1) the product cost's method is 'real'
#2) we just fixed a negative quant caused by an outgoing shipment
if quant.product_id.cost_method == 'real' and quant.location_id.usage != 'internal':
self.pool.get('stock.move')._store_average_cost_price(cr, uid, move, context=context)
def _account_entry_move(self, cr, uid, quants, move, context=None):
"""
Accounting Valuation Entries
quants: browse record list of Quants to create accounting valuation entries for. Unempty and all quants are supposed to have the same location id (thay already moved in)
move: Move to use. browse record
"""
if context is None:
context = {}
location_obj = self.pool.get('stock.location')
location_from = move.location_id
location_to = quants[0].location_id
company_from = location_obj._location_owner(cr, uid, location_from, context=context)
company_to = location_obj._location_owner(cr, uid, location_to, context=context)
if move.product_id.valuation != 'real_time':
return False
for q in quants:
if q.owner_id:
#if the quant isn't owned by the company, we don't make any valuation entry
return False
if q.qty <= 0:
#we don't make any stock valuation for negative quants because the valuation is already made for the counterpart.
#At that time the valuation will be made at the product cost price and afterward there will be new accounting entries
#to make the adjustments when we know the real cost price.
return False
#in case of routes making the link between several warehouse of the same company, the transit location belongs to this company, so we don't need to create accounting entries
# Create Journal Entry for products arriving in the company
if company_to and (move.location_id.usage not in ('internal', 'transit') and move.location_dest_id.usage == 'internal' or company_from != company_to):
ctx = context.copy()
ctx['force_company'] = company_to.id
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, context=ctx)
if location_from and location_from.usage == 'customer':
#goods returned from customer
self._create_account_move_line(cr, uid, quants, move, acc_dest, acc_valuation, journal_id, context=ctx)
else:
self._create_account_move_line(cr, uid, quants, move, acc_src, acc_valuation, journal_id, context=ctx)
# Create Journal Entry for products leaving the company
if company_from and (move.location_id.usage == 'internal' and move.location_dest_id.usage not in ('internal', 'transit') or company_from != company_to):
ctx = context.copy()
ctx['force_company'] = company_from.id
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, context=ctx)
if location_to and location_to.usage == 'supplier':
#goods returned to supplier
self._create_account_move_line(cr, uid, quants, move, acc_valuation, acc_src, journal_id, context=ctx)
else:
self._create_account_move_line(cr, uid, quants, move, acc_valuation, acc_dest, journal_id, context=ctx)
def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, force_location_from=False, force_location_to=False, context=None):
quant = super(stock_quant, self)._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=force_location_from, force_location_to=force_location_to, context=context)
if move.product_id.valuation == 'real_time':
self._account_entry_move(cr, uid, [quant], move, context)
return quant
def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, context=None):
res = super(stock_quant, self).move_quants_write(cr, uid, quants, move, location_dest_id, dest_package_id, context=context)
if move.product_id.valuation == 'real_time':
self._account_entry_move(cr, uid, quants, move, context=context)
return res
def _get_accounting_data_for_valuation(self, cr, uid, move, context=None):
"""
Return the accounts and journal to use to post Journal Entries for the real-time
valuation of the quant.
:param context: context dictionary that can explicitly mention the company to consider via the 'force_company' key
:returns: journal_id, source account, destination account, valuation account
:raise: osv.except_osv() is any mandatory account or journal is not defined.
"""
product_obj = self.pool.get('product.template')
accounts = product_obj.get_product_accounts(cr, uid, move.product_id.product_tmpl_id.id, context)
if move.location_id.valuation_out_account_id:
acc_src = move.location_id.valuation_out_account_id.id
else:
acc_src = accounts['stock_account_input']
if move.location_dest_id.valuation_in_account_id:
acc_dest = move.location_dest_id.valuation_in_account_id.id
else:
acc_dest = accounts['stock_account_output']
acc_valuation = accounts.get('property_stock_valuation_account_id', False)
journal_id = accounts['stock_journal']
return journal_id, acc_src, acc_dest, acc_valuation
def _prepare_account_move_line(self, cr, uid, move, qty, cost, credit_account_id, debit_account_id, context=None):
"""
Generate the account.move.line values to post to track the stock valuation difference due to the
processing of the given quant.
"""
if context is None:
context = {}
currency_obj = self.pool.get('res.currency')
if context.get('force_valuation_amount'):
valuation_amount = context.get('force_valuation_amount')
else:
if move.product_id.cost_method == 'average':
valuation_amount = cost if move.location_id.usage != 'internal' and move.location_dest_id.usage == 'internal' else move.product_id.standard_price
else:
valuation_amount = cost if move.product_id.cost_method == 'real' else move.product_id.standard_price
#the standard_price of the product may be in another decimal precision, or not compatible with the coinage of
#the company currency... so we need to use round() before creating the accounting entries.
valuation_amount = currency_obj.round(cr, uid, move.company_id.currency_id, valuation_amount * qty)
partner_id = (move.picking_id.partner_id and self.pool.get('res.partner')._find_accounting_partner(move.picking_id.partner_id).id) or False
debit_line_vals = {
'name': move.name,
'product_id': move.product_id.id,
'quantity': qty,
'product_uom_id': move.product_id.uom_id.id,
'ref': move.picking_id and move.picking_id.name or False,
'date': move.date,
'partner_id': partner_id,
'debit': valuation_amount > 0 and valuation_amount or 0,
'credit': valuation_amount < 0 and -valuation_amount or 0,
'account_id': debit_account_id,
}
credit_line_vals = {
'name': move.name,
'product_id': move.product_id.id,
'quantity': qty,
'product_uom_id': move.product_id.uom_id.id,
'ref': move.picking_id and move.picking_id.name or False,
'date': move.date,
'partner_id': partner_id,
'credit': valuation_amount > 0 and valuation_amount or 0,
'debit': valuation_amount < 0 and -valuation_amount or 0,
'account_id': credit_account_id,
}
return [(0, 0, debit_line_vals), (0, 0, credit_line_vals)]
def _create_account_move_line(self, cr, uid, quants, move, credit_account_id, debit_account_id, journal_id, context=None):
#group quants by cost
quant_cost_qty = {}
for quant in quants:
if quant_cost_qty.get(quant.cost):
quant_cost_qty[quant.cost] += quant.qty
else:
quant_cost_qty[quant.cost] = quant.qty
move_obj = self.pool.get('account.move')
for cost, qty in quant_cost_qty.items():
move_lines = self._prepare_account_move_line(cr, uid, move, qty, cost, credit_account_id, debit_account_id, context=context)
period_id = context.get('force_period', self.pool.get('account.period').find(cr, uid, context=context)[0])
move_obj.create(cr, uid, {'journal_id': journal_id,
'line_id': move_lines,
'period_id': period_id,
'date': fields.date.context_today(self, cr, uid, context=context),
'ref': move.picking_id.name}, context=context)
#def _reconcile_single_negative_quant(self, cr, uid, to_solve_quant, quant, quant_neg, qty, context=None):
# move = self._get_latest_move(cr, uid, to_solve_quant, context=context)
# quant_neg_position = quant_neg.negative_dest_location_id.usage
# remaining_solving_quant, remaining_to_solve_quant = super(stock_quant, self)._reconcile_single_negative_quant(cr, uid, to_solve_quant, quant, quant_neg, qty, context=context)
# #update the standard price of the product, only if we would have done it if we'd have had enough stock at first, which means
# #1) there isn't any negative quant anymore
# #2) the product cost's method is 'real'
# #3) we just fixed a negative quant caused by an outgoing shipment
# if not remaining_to_solve_quant and move.product_id.cost_method == 'real' and quant_neg_position != 'internal':
# self.pool.get('stock.move')._store_average_cost_price(cr, uid, move, context=context)
# return remaining_solving_quant, remaining_to_solve_quant
class stock_move(osv.osv):
_inherit = "stock.move"
def action_done(self, cr, uid, ids, context=None):
self.product_price_update_before_done(cr, uid, ids, context=context)
res = super(stock_move, self).action_done(cr, uid, ids, context=context)
self.product_price_update_after_done(cr, uid, ids, context=context)
return res
def _store_average_cost_price(self, cr, uid, move, context=None):
''' move is a browe record '''
product_obj = self.pool.get('product.product')
if any([q.qty <= 0 for q in move.quant_ids]):
#if there is a negative quant, the standard price shouldn't be updated
return
#Note: here we can't store a quant.cost directly as we may have moved out 2 units (1 unit to 5€ and 1 unit to 7€) and in case of a product return of 1 unit, we can't know which of the 2 costs has to be used (5€ or 7€?). So at that time, thanks to the average valuation price we are storing we will svaluate it at 6€
average_valuation_price = 0.0
for q in move.quant_ids:
average_valuation_price += q.qty * q.cost
average_valuation_price = average_valuation_price / move.product_qty
# Write the standard price, as SUPERUSER_ID because a warehouse manager may not have the right to write on products
ctx = dict(context, force_company=move.company_id.id)
product_obj.write(cr, SUPERUSER_ID, [move.product_id.id], {'standard_price': average_valuation_price}, context=ctx)
self.write(cr, uid, [move.id], {'price_unit': average_valuation_price}, context=context)
def product_price_update_before_done(self, cr, uid, ids, context=None):
product_obj = self.pool.get('product.product')
tmpl_dict = {}
for move in self.browse(cr, uid, ids, context=context):
#adapt standard price on incomming moves if the product cost_method is 'average'
if (move.location_id.usage == 'supplier') and (move.product_id.cost_method == 'average'):
product = move.product_id
prod_tmpl_id = move.product_id.product_tmpl_id.id
qty_available = move.product_id.product_tmpl_id.qty_available
if tmpl_dict.get(prod_tmpl_id):
product_avail = qty_available + tmpl_dict[prod_tmpl_id]
else:
tmpl_dict[prod_tmpl_id] = 0
product_avail = qty_available
if product_avail <= 0:
new_std_price = move.price_unit
else:
# Get the standard price
amount_unit = product.standard_price
new_std_price = ((amount_unit * product_avail) + (move.price_unit * move.product_qty)) / (product_avail + move.product_qty)
tmpl_dict[prod_tmpl_id] += move.product_qty
# Write the standard price, as SUPERUSER_ID because a warehouse manager may not have the right to write on products
ctx = dict(context or {}, force_company=move.company_id.id)
product_obj.write(cr, SUPERUSER_ID, [product.id], {'standard_price': new_std_price}, context=ctx)
def product_price_update_after_done(self, cr, uid, ids, context=None):
'''
This method adapts the price on the product when necessary
'''
for move in self.browse(cr, uid, ids, context=context):
#adapt standard price on outgoing moves if the product cost_method is 'real', so that a return
#or an inventory loss is made using the last value used for an outgoing valuation.
if move.product_id.cost_method == 'real' and move.location_dest_id.usage != 'internal':
#store the average price of the move on the move and product form
self._store_average_cost_price(cr, uid, move, context=context)
| agpl-3.0 |
sethshill/final | build/lib.linux-armv7l-2.7/bibliopixel/animation.py | 2 | 14970 | import time
import log
from led import LEDMatrix
from led import LEDStrip
from led import LEDCircle
import colors
from util import d
import threading
class animThread(threading.Thread):
def __init__(self, anim, args):
super(animThread, self).__init__()
self.setDaemon(True)
self._anim = anim
self._args = args
def run(self):
log.debug("Starting thread...")
self._anim._run(**self._args)
log.debug("Thread Complete")
class BaseAnimation(object):
def __init__(self, led):
self._led = led
self.animComplete = False
self._step = 0
self._timeRef = 0
self._internalDelay = None
self._sleep = None
self._threaded = False
self._thread = None
self._callback = None
self._stopEvent = threading.Event()
self._stopEvent.clear()
self._led._threadedAnim = False
self._free_run = False
def _msTime(self):
return time.time() * 1000.0
def preRun(self, amt=1):
self._led.all_off()
def preStep(self, amt=1):
pass
def postStep(self, amt=1):
pass
def step(self, amt=1):
raise RuntimeError("Base class step() called. This shouldn't happen")
def stopThread(self, wait=False):
if self._thread:
self._stopEvent.set()
if wait:
self._thread.join()
def __enter__(self):
return self
def _exit(self, type, value, traceback):
pass
def __exit__(self, type, value, traceback):
self._exit(type, value, traceback)
self.stopThread(wait=True)
self._led.all_off()
self._led.update()
self._led.waitForUpdate()
def cleanup(self):
return self.__exit__(None, None, None)
def stopped(self):
return not (self._thread and self._thread.isAlive())
def _run(self, amt, fps, sleep, max_steps, untilComplete, max_cycles, seconds):
self.preRun()
# calculate sleep time base on desired Frames per Second
if fps:
sleep = int(1000 / fps)
if seconds is not None:
max_steps = int((seconds * 1000) / sleep)
initSleep = sleep
self._step = 0
cur_step = 0
cycle_count = 0
self.animComplete = False
while (not self._stopEvent.isSet() and
((max_steps == 0 and not untilComplete) or
(max_steps > 0 and cur_step < max_steps) or
(max_steps == 0 and untilComplete and not self.animComplete))):
self._timeRef = self._msTime()
start = self._msTime()
if hasattr(self, "_input_dev"):
self._keys = self._input_dev.getKeys()
self.preStep(amt)
self.step(amt)
self.postStep(amt)
mid = self._msTime()
if self._free_run:
sleep = None
elif self._internalDelay:
sleep = self._internalDelay
elif initSleep:
sleep = initSleep
self._sleep = sleep
self._led._frameGenTime = int(mid - start)
self._led._frameTotalTime = sleep
self._led.update()
now = self._msTime()
if self.animComplete and max_cycles > 0:
if cycle_count < max_cycles - 1:
cycle_count += 1
self.animComplete = False
stepTime = int(mid - start)
if self._led._threadedUpdate:
updateTime = int(self._led.lastThreadedUpdate())
totalTime = updateTime
else:
updateTime = int(now - mid)
totalTime = stepTime + updateTime
if self._led._threadedUpdate:
log.debug(
"Frame: %sms / Update Max: %sms", stepTime, updateTime)
else:
log.debug("%sms/%sfps / Frame: %sms / Update: %sms",
totalTime, int(1000 / max(totalTime, 1)), stepTime, updateTime)
if sleep:
diff = (self._msTime() - self._timeRef)
t = max(0, (sleep - diff) / 1000.0)
if t == 0:
log.warning(
"Frame-time of %dms set, but took %dms!", sleep, diff)
if self._threaded:
self._stopEvent.wait(t)
else:
time.sleep(t)
cur_step += 1
self._exit(None, None, None)
if self._callback:
self._callback(self)
def run(self, amt=1, fps=None, sleep=None, max_steps=0, untilComplete=False, max_cycles=0, threaded=False, joinThread=False, callback=None, seconds=None):
self._led._threadedAnim = self._threaded = threaded
if self._threaded:
self._stopEvent.clear()
self._callback = callback
if self._threaded:
args = {}
l = locals()
run_params = ["amt", "fps", "sleep",
"max_steps", "untilComplete", "max_cycles", "seconds"]
for p in run_params:
if p in l:
args[p] = l[p]
self._thread = animThread(self, args)
self._thread.start()
if joinThread:
self._thread.join()
else:
self._run(amt, fps, sleep, max_steps, untilComplete, max_cycles, seconds)
RUN_PARAMS = [{
"id": "amt",
"label": "Step Amount",
"type": "int",
"min": 1,
"default": 1,
"help": "Amount to step animation by on each frame. May not be used on some animations."
}, {
"id": "fps",
"label": "Framerate",
"type": "int",
"default": 15,
"min": 1,
"help": "Framerate at which to run animation."
}, {
"id": "seconds",
"label": "Run Seconds",
"type": "int",
"default": None,
"min": 0,
"help": "Number of seconds to run animation for, based on framerate."
}, {
"id": "max_steps",
"label": "Max Frames",
"type": "int",
"min": 0,
"default": 0,
"help": "Total frames to run before stopping."
}, {
"id": "untilComplete",
"label": "Until Complete",
"type": "bool",
"default": False,
"help": "Run until animation marks itself as complete. If supported."
}, {
"id": "max_cycles",
"label": "Max Cycles",
"type": "int",
"min": 1,
"default": 1,
"help": "If Until Complete is set, animation will repeat this many times."
}, ]
class OffAnim(BaseAnimation):
def __init__(self, led, timeout=10):
super(OffAnim, self).__init__(led)
self._internalDelay = timeout * 1000
def step(self, amt=1):
self._led.all_off()
class AnimationQueue(BaseAnimation):
def __init__(self, led, anims=None):
super(AnimationQueue, self).__init__(led)
self.anims = anims or []
self.curAnim = None
self.animIndex = 0
self._internalDelay = 0 # never wait
self.fps = None
self.untilComplete = False
# overriding to handle all the animations
def stopThread(self, wait=False):
for a, r in self.anims:
# a bit of a hack. they aren't threaded, but stops them anyway
a._stopEvent.set()
super(AnimationQueue, self).stopThread(wait)
def addAnim(self, anim, amt=1, fps=None, max_steps=0, untilComplete=False, max_cycles=0, seconds=None):
a = (
anim,
{
"amt": amt,
"fps": fps,
"max_steps": max_steps,
"untilComplete": untilComplete,
"max_cycles": max_cycles,
"seconds": seconds
}
)
self.anims.append(a)
def preRun(self, amt=1):
if len(self.anims) == 0:
raise Exception("Must provide at least one animation.")
self.animIndex = -1
def run(self, amt=1, fps=None, sleep=None, max_steps=0, untilComplete=False, max_cycles=0, threaded=False, joinThread=False, callback=None, seconds=None):
self.fps = fps
self.untilComplete = untilComplete
super(AnimationQueue, self).run(amt=1, fps=None, sleep=None, max_steps=0, untilComplete=untilComplete,
max_cycles=0, threaded=threaded, joinThread=joinThread, callback=callback, seconds=seconds)
def step(self, amt=1):
self.animIndex += 1
if self.animIndex >= len(self.anims):
if self.untilComplete:
self.animComplete = True
else:
self.animIndex = 0
if not self.animComplete:
self.curAnim = self.anims[self.animIndex]
anim, run = self.curAnim
run.update(threaded=False, joinThread=False, callback=None)
run['fps'] = run.get('fps') or self.fps
anim.run(**(run))
RUN_PARAMS = [{
"id": "fps",
"label": "Default Framerate",
"type": "int",
"default": None,
"min": 1,
"help": "Default framerate to run all animations in queue."
}, {
"id": "untilComplete",
"label": "Until Complete",
"type": "bool",
"default": False,
"help": "Run until animation marks itself as complete. If supported."
}]
class BaseStripAnim(BaseAnimation):
def __init__(self, led, start=0, end=-1):
super(BaseStripAnim, self).__init__(led)
if not isinstance(led, LEDStrip):
raise RuntimeError("Must use LEDStrip with Strip Animations!")
self._start = max(start, 0)
self._end = end
if self._end < 0 or self._end > self._led.lastIndex:
self._end = self._led.lastIndex
self._size = self._end - self._start + 1
class BaseMatrixAnim(BaseAnimation):
def __init__(self, led, width=0, height=0, startX=0, startY=0):
super(BaseMatrixAnim, self).__init__(led)
if not isinstance(led, LEDMatrix):
raise RuntimeError("Must use LEDMatrix with Matrix Animations!")
self.width = width or led.width
self.height = height or led.height
self.startX = startX
self.startY = startY
class BaseGameAnim(BaseMatrixAnim):
def __init__(self, led, inputDev):
super(BaseGameAnim, self).__init__(led)
self._input_dev = inputDev
self._keys = None
self._lastKeys = None
self._speedStep = 0
self._speeds = {}
self._keyfuncs = {}
def _exit(self, type, value, traceback):
if hasattr(self._input_dev, 'setLightsOff'):
self._input_dev.setLightsOff(5)
self._input_dev.close()
def setSpeed(self, name, speed):
self._speeds[name] = speed
def getSpeed(self, name):
return self._speeds.get(name)
def _checkSpeed(self, speed):
return not (self._speedStep % speed)
def checkSpeed(self, name):
return name in self._speeds and self._checkSpeed(self._speeds[name])
def addKeyFunc(self, key, func, speed=1, hold=True):
if not isinstance(key, list):
key = [key]
for k in key:
self._keyfuncs[k] = d({
"func": func,
"speed": speed,
"hold": hold,
"last": False,
"inter": False
})
def handleKeys(self):
kf = self._keyfuncs
for key in self._keys:
val = self._keys[key]
if key in kf:
cfg = kf[key]
speedPass = self._checkSpeed(cfg.speed)
if cfg.hold:
if speedPass:
if (val or cfg.inter):
cfg.func()
else:
cfg.inter = cfg.last = val
elif speedPass:
if (val or cfg.inter) and not cfg.last:
cfg.func()
cfg.inter = cfg.last = val
else:
cfg.inter |= val
self._lastKeys = self._keys
def preStep(self, amt):
pass
def postStep(self, amt):
self._speedStep += 1
class BaseCircleAnim(BaseAnimation):
def __init__(self, led):
super(BaseCircleAnim, self).__init__(led)
if not isinstance(led, LEDCircle):
raise RuntimeError("Must use LEDCircle with Circle Animations!")
self.rings = led.rings
self.ringCount = led.ringCount
self.lastRing = led.lastRing
self.ringSteps = led.ringSteps
class StripChannelTest(BaseStripAnim):
def __init__(self, led):
super(StripChannelTest, self).__init__(led)
self._internalDelay = 500
self.colors = [colors.Red, colors.Green, colors.Blue, colors.White]
def step(self, amt=1):
self._led.set(0, colors.Red)
self._led.set(1, colors.Green)
self._led.set(2, colors.Green)
self._led.set(3, colors.Blue)
self._led.set(4, colors.Blue)
self._led.set(5, colors.Blue)
color = self._step % 4
self._led.fill(self.colors[color], 7, 9)
self._step += 1
class MatrixChannelTest(BaseMatrixAnim):
def __init__(self, led):
super(MatrixChannelTest, self).__init__(led, 0, 0)
self._internalDelay = 500
self.colors = [colors.Red, colors.Green, colors.Blue, colors.White]
def step(self, amt=1):
self._led.drawLine(0, 0, 0, self.height - 1, colors.Red)
self._led.drawLine(1, 0, 1, self.height - 1, colors.Green)
self._led.drawLine(2, 0, 2, self.height - 1, colors.Green)
self._led.drawLine(3, 0, 3, self.height - 1, colors.Blue)
self._led.drawLine(4, 0, 4, self.height - 1, colors.Blue)
self._led.drawLine(5, 0, 5, self.height - 1, colors.Blue)
color = self._step % 4
self._led.fillRect(7, 0, 3, self.height, self.colors[color])
self._step += 1
class MatrixCalibrationTest(BaseMatrixAnim):
def __init__(self, led):
super(MatrixCalibrationTest, self).__init__(led, 0, 0)
self._internalDelay = 500
self.colors = [colors.Red, colors.Green, colors.Green,
colors.Blue, colors.Blue, colors.Blue]
def step(self, amt=1):
self._led.all_off()
i = self._step % self.width
for x in range(i + 1):
c = self.colors[x % len(self.colors)]
self._led.drawLine(x, 0, x, i, c)
self.animComplete = (i == (self.width - 1))
self._step += 1
| mit |
protatremy/buildbot | master/buildbot/test/unit/test_scripts_upgrade_master.py | 10 | 8417 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import mock
from twisted.internet import defer
from twisted.python.compat import NativeStringIO
from twisted.trial import unittest
from buildbot import config as config_module
from buildbot.db import connector
from buildbot.db import masters
from buildbot.db import model
from buildbot.scripts import base
from buildbot.scripts import upgrade_master
from buildbot.test.util import dirs
from buildbot.test.util import misc
from buildbot.test.util import www
def mkconfig(**kwargs):
config = dict(quiet=False, replace=False, basedir='test')
config.update(kwargs)
return config
class TestUpgradeMaster(dirs.DirsMixin, misc.StdoutAssertionsMixin,
unittest.TestCase):
def setUp(self):
# createMaster is decorated with @in_reactor, so strip that decoration
# since the master is already running
self.patch(upgrade_master, 'upgradeMaster',
upgrade_master.upgradeMaster._orig)
self.setUpDirs('test')
self.setUpStdoutAssertions()
def patchFunctions(self, basedirOk=True, configOk=True):
self.calls = []
def checkBasedir(config):
self.calls.append('checkBasedir')
return basedirOk
self.patch(base, 'checkBasedir', checkBasedir)
def loadConfig(config, configFileName='master.cfg'):
self.calls.append('loadConfig')
return config_module.MasterConfig() if configOk else False
self.patch(base, 'loadConfig', loadConfig)
def upgradeFiles(config):
self.calls.append('upgradeFiles')
self.patch(upgrade_master, 'upgradeFiles', upgradeFiles)
def upgradeDatabase(config, master_cfg):
self.assertIsInstance(master_cfg, config_module.MasterConfig)
self.calls.append('upgradeDatabase')
self.patch(upgrade_master, 'upgradeDatabase', upgradeDatabase)
# tests
def test_upgradeMaster_success(self):
self.patchFunctions()
d = upgrade_master.upgradeMaster(mkconfig(), _noMonkey=True)
@d.addCallback
def check(rv):
self.assertEqual(rv, 0)
self.assertInStdout('upgrade complete')
return d
def test_upgradeMaster_quiet(self):
self.patchFunctions()
d = upgrade_master.upgradeMaster(mkconfig(quiet=True), _noMonkey=True)
@d.addCallback
def check(rv):
self.assertEqual(rv, 0)
self.assertWasQuiet()
return d
def test_upgradeMaster_bad_basedir(self):
self.patchFunctions(basedirOk=False)
d = upgrade_master.upgradeMaster(mkconfig(), _noMonkey=True)
@d.addCallback
def check(rv):
self.assertEqual(rv, 1)
return d
def test_upgradeMaster_bad_config(self):
self.patchFunctions(configOk=False)
d = upgrade_master.upgradeMaster(mkconfig(), _noMonkey=True)
@d.addCallback
def check(rv):
self.assertEqual(rv, 1)
return d
class TestUpgradeMasterFunctions(www.WwwTestMixin, dirs.DirsMixin,
misc.StdoutAssertionsMixin, unittest.TestCase):
def setUp(self):
self.setUpDirs('test')
self.basedir = os.path.abspath(os.path.join('test', 'basedir'))
self.setUpStdoutAssertions()
def tearDown(self):
self.tearDownDirs()
def writeFile(self, path, contents):
with open(path, 'wt') as f:
f.write(contents)
def readFile(self, path):
with open(path, 'rt') as f:
return f.read()
# tests
def test_installFile(self):
self.writeFile('test/srcfile', 'source data')
upgrade_master.installFile(mkconfig(), 'test/destfile', 'test/srcfile')
self.assertEqual(self.readFile('test/destfile'), 'source data')
self.assertInStdout('creating test/destfile')
def test_installFile_existing_differing(self):
self.writeFile('test/srcfile', 'source data')
self.writeFile('test/destfile', 'dest data')
upgrade_master.installFile(mkconfig(), 'test/destfile', 'test/srcfile')
self.assertEqual(self.readFile('test/destfile'), 'dest data')
self.assertEqual(self.readFile('test/destfile.new'), 'source data')
self.assertInStdout('writing new contents to')
def test_installFile_existing_differing_overwrite(self):
self.writeFile('test/srcfile', 'source data')
self.writeFile('test/destfile', 'dest data')
upgrade_master.installFile(mkconfig(), 'test/destfile', 'test/srcfile',
overwrite=True)
self.assertEqual(self.readFile('test/destfile'), 'source data')
self.assertFalse(os.path.exists('test/destfile.new'))
self.assertInStdout('overwriting')
def test_installFile_existing_same(self):
self.writeFile('test/srcfile', 'source data')
self.writeFile('test/destfile', 'source data')
upgrade_master.installFile(mkconfig(), 'test/destfile', 'test/srcfile')
self.assertEqual(self.readFile('test/destfile'), 'source data')
self.assertFalse(os.path.exists('test/destfile.new'))
self.assertWasQuiet()
def test_installFile_quiet(self):
self.writeFile('test/srcfile', 'source data')
upgrade_master.installFile(mkconfig(quiet=True), 'test/destfile',
'test/srcfile')
self.assertWasQuiet()
def test_upgradeFiles(self):
upgrade_master.upgradeFiles(mkconfig())
for f in [
'test/master.cfg.sample',
]:
self.assertTrue(os.path.exists(f), "%s not found" % f)
self.assertInStdout('upgrading basedir')
def test_upgradeFiles_notice_about_unused_public_html(self):
os.mkdir('test/public_html')
self.writeFile('test/public_html/index.html', 'INDEX')
upgrade_master.upgradeFiles(mkconfig())
self.assertInStdout('public_html is not used')
@defer.inlineCallbacks
def test_upgradeDatabase(self):
setup = mock.Mock(side_effect=lambda **kwargs: defer.succeed(None))
self.patch(connector.DBConnector, 'setup', setup)
upgrade = mock.Mock(side_effect=lambda **kwargs: defer.succeed(None))
self.patch(model.Model, 'upgrade', upgrade)
setAllMastersActiveLongTimeAgo = mock.Mock(
side_effect=lambda **kwargs: defer.succeed(None))
self.patch(masters.MastersConnectorComponent,
'setAllMastersActiveLongTimeAgo', setAllMastersActiveLongTimeAgo)
yield upgrade_master.upgradeDatabase(
mkconfig(basedir='test', quiet=True),
config_module.MasterConfig())
setup.asset_called_with(check_version=False, verbose=False)
upgrade.assert_called_with()
self.assertWasQuiet()
@defer.inlineCallbacks
def test_upgradeDatabaseFail(self):
setup = mock.Mock(side_effect=lambda **kwargs: defer.succeed(None))
self.patch(connector.DBConnector, 'setup', setup)
self.patch(sys, 'stderr', NativeStringIO())
upgrade = mock.Mock(
side_effect=lambda **kwargs: defer.fail(Exception("o noz")))
self.patch(model.Model, 'upgrade', upgrade)
ret = yield upgrade_master._upgradeMaster(
mkconfig(basedir='test', quiet=True),
config_module.MasterConfig())
self.assertEqual(ret, 1)
self.assertIn("problem while upgrading!:\nTraceback (most recent call last):\n",
sys.stderr.getvalue())
self.assertIn("o noz", sys.stderr.getvalue())
| gpl-2.0 |
tsl143/addons-server | src/olympia/zadmin/management/commands/addusertogroup.py | 2 | 1528 | from django.core.management.base import BaseCommand, CommandError
from django.db import IntegrityError
import olympia.core.logger
from olympia.access.models import Group, GroupUser
from olympia.users.models import UserProfile
class Command(BaseCommand):
help = 'Add a new user to a group.'
log = olympia.core.logger.getLogger('z.users')
def add_arguments(self, parser):
parser.add_argument('user', type=unicode, help='User id or email')
parser.add_argument('group_id', type=int, help='Group id')
def handle(self, *args, **options):
do_adduser(options['user'], options['group_id'])
msg = 'Adding {user} to {group}\n'.format(
user=options['user'], group=options['group_id'])
self.log.info(msg)
self.stdout.write(msg)
def do_adduser(user, group):
try:
if '@' in user:
user = UserProfile.objects.get(email=user)
elif user.isdigit():
user = UserProfile.objects.get(pk=user)
else:
raise CommandError('Unknown input for user.')
group = Group.objects.get(pk=group)
GroupUser.objects.create(user=user, group=group)
except IntegrityError, e:
raise CommandError('User is already in that group? %s' % e)
except UserProfile.DoesNotExist:
raise CommandError('User ({user}) does not exist.'.format(user=user))
except Group.DoesNotExist:
raise CommandError('Group ({group}) does not exist.'
.format(group=group))
| bsd-3-clause |
gnowxilef/youtube-dl | youtube_dl/extractor/tmz.py | 65 | 2138 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class TMZIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tmz\.com/videos/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'http://www.tmz.com/videos/0_okj015ty/',
'md5': '4d22a51ef205b6c06395d8394f72d560',
'info_dict': {
'id': '0_okj015ty',
'ext': 'mp4',
'title': 'Kim Kardashian\'s Boobs Unlock a Mystery!',
'description': 'Did Kim Kardasain try to one-up Khloe by one-upping Kylie??? Or is she just showing off her amazing boobs?',
'timestamp': 1394747163,
'uploader_id': 'batchUser',
'upload_date': '20140313',
}
}, {
'url': 'http://www.tmz.com/videos/0-cegprt2p/',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url).replace('-', '_')
return self.url_result('kaltura:591531:%s' % video_id, 'Kaltura', video_id)
class TMZArticleIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tmz\.com/\d{4}/\d{2}/\d{2}/(?P<id>[^/]+)/?'
_TEST = {
'url': 'http://www.tmz.com/2015/04/19/bobby-brown-bobbi-kristina-awake-video-concert',
'md5': '3316ff838ae5bb7f642537825e1e90d2',
'info_dict': {
'id': '0_6snoelag',
'ext': 'mov',
'title': 'Bobby Brown Tells Crowd ... Bobbi Kristina is Awake',
'description': 'Bobby Brown stunned his audience during a concert Saturday night, when he told the crowd, "Bobbi is awake. She\'s watching me."',
'timestamp': 1429467813,
'upload_date': '20150419',
'uploader_id': 'batchUser',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
embedded_video_info = self._parse_json(self._html_search_regex(
r'tmzVideoEmbed\(({.+?})\);', webpage, 'embedded video info'),
video_id)
return self.url_result(
'http://www.tmz.com/videos/%s/' % embedded_video_info['id'])
| unlicense |
BaichuanWu/Blog_on_django | site-packages/django/contrib/gis/tests/distapp/tests.py | 54 | 19622 | from __future__ import unicode_literals
from unittest import skipUnless
from django.db import connection
from django.db.models import Q
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.measure import D # alias for Distance
from django.contrib.gis.tests.utils import (
HAS_SPATIAL_DB, mysql, oracle, postgis, spatialite, no_oracle, no_spatialite
)
from django.test import TestCase
if HAS_GEOS and HAS_SPATIAL_DB:
from django.contrib.gis.geos import GEOSGeometry, LineString
from .models import (AustraliaCity, Interstate, SouthTexasInterstate,
SouthTexasCity, SouthTexasCityFt, CensusZipcode, SouthTexasZipcode)
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB and not mysql,
"Geos and spatial db (not mysql) are required.")
class DistanceTest(TestCase):
if HAS_GEOS and HAS_SPATIAL_DB:
# A point we are testing distances with -- using a WGS84
# coordinate that'll be implicitly transformed to that to
# the coordinate system of the field, EPSG:32140 (Texas South Central
# w/units in meters)
stx_pnt = GEOSGeometry('POINT (-95.370401017314293 29.704867409475465)', 4326)
# Another one for Australia
au_pnt = GEOSGeometry('POINT (150.791 -34.4919)', 4326)
def get_names(self, qs):
cities = [c.name for c in qs]
cities.sort()
return cities
def test_init(self):
"""
Test initialization of distance models.
"""
self.assertEqual(9, SouthTexasCity.objects.count())
self.assertEqual(9, SouthTexasCityFt.objects.count())
self.assertEqual(11, AustraliaCity.objects.count())
self.assertEqual(4, SouthTexasZipcode.objects.count())
self.assertEqual(4, CensusZipcode.objects.count())
self.assertEqual(1, Interstate.objects.count())
self.assertEqual(1, SouthTexasInterstate.objects.count())
@no_spatialite
def test_dwithin(self):
"""
Test the `dwithin` lookup type.
"""
# Distances -- all should be equal (except for the
# degree/meter pair in au_cities, that's somewhat
# approximate).
tx_dists = [(7000, 22965.83), D(km=7), D(mi=4.349)]
au_dists = [(0.5, 32000), D(km=32), D(mi=19.884)]
# Expected cities for Australia and Texas.
tx_cities = ['Downtown Houston', 'Southside Place']
au_cities = ['Mittagong', 'Shellharbour', 'Thirroul', 'Wollongong']
# Performing distance queries on two projected coordinate systems one
# with units in meters and the other in units of U.S. survey feet.
for dist in tx_dists:
if isinstance(dist, tuple):
dist1, dist2 = dist
else:
dist1 = dist2 = dist
qs1 = SouthTexasCity.objects.filter(point__dwithin=(self.stx_pnt, dist1))
qs2 = SouthTexasCityFt.objects.filter(point__dwithin=(self.stx_pnt, dist2))
for qs in qs1, qs2:
self.assertEqual(tx_cities, self.get_names(qs))
# Now performing the `dwithin` queries on a geodetic coordinate system.
for dist in au_dists:
if isinstance(dist, D) and not oracle:
type_error = True
else:
type_error = False
if isinstance(dist, tuple):
if oracle:
dist = dist[1]
else:
dist = dist[0]
# Creating the query set.
qs = AustraliaCity.objects.order_by('name')
if type_error:
# A ValueError should be raised on PostGIS when trying to pass
# Distance objects into a DWithin query using a geodetic field.
self.assertRaises(ValueError, AustraliaCity.objects.filter(point__dwithin=(self.au_pnt, dist)).count)
else:
self.assertListEqual(au_cities, self.get_names(qs.filter(point__dwithin=(self.au_pnt, dist))))
def test_distance_projected(self):
"""
Test the `distance` GeoQuerySet method on projected coordinate systems.
"""
# The point for La Grange, TX
lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326)
# Reference distances in feet and in meters. Got these values from
# using the provided raw SQL statements.
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 32140)) FROM distapp_southtexascity;
m_distances = [147075.069813, 139630.198056, 140888.552826,
138809.684197, 158309.246259, 212183.594374,
70870.188967, 165337.758878, 139196.085105]
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 2278)) FROM distapp_southtexascityft;
# Oracle 11 thinks this is not a projected coordinate system, so it's
# not tested.
ft_distances = [482528.79154625, 458103.408123001, 462231.860397575,
455411.438904354, 519386.252102563, 696139.009211594,
232513.278304279, 542445.630586414, 456679.155883207]
# Testing using different variations of parameters and using models
# with different projected coordinate systems.
dist1 = SouthTexasCity.objects.distance(lagrange, field_name='point')
dist2 = SouthTexasCity.objects.distance(lagrange) # Using GEOSGeometry parameter
if spatialite or oracle:
dist_qs = [dist1, dist2]
else:
dist3 = SouthTexasCityFt.objects.distance(lagrange.ewkt) # Using EWKT string parameter.
dist4 = SouthTexasCityFt.objects.distance(lagrange)
dist_qs = [dist1, dist2, dist3, dist4]
# Original query done on PostGIS, have to adjust AlmostEqual tolerance
# for Oracle.
tol = 2 if oracle else 5
# Ensuring expected distances are returned for each distance queryset.
for qs in dist_qs:
for i, c in enumerate(qs):
self.assertAlmostEqual(m_distances[i], c.distance.m, tol)
self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol)
@no_spatialite
def test_distance_geodetic(self):
"""
Test the `distance` GeoQuerySet method on geodetic coordinate systems.
"""
tol = 2 if oracle else 5
# Testing geodetic distance calculation with a non-point geometry
# (a LineString of Wollongong and Shellharbour coords).
ls = LineString(((150.902, -34.4245), (150.87, -34.5789)))
if oracle or connection.ops.geography:
# Reference query:
# SELECT ST_distance_sphere(point, ST_GeomFromText('LINESTRING(150.9020 -34.4245,150.8700 -34.5789)', 4326)) FROM distapp_australiacity ORDER BY name;
distances = [1120954.92533513, 140575.720018241, 640396.662906304,
60580.9693849269, 972807.955955075, 568451.8357838,
40435.4335201384, 0, 68272.3896586844, 12375.0643697706, 0]
qs = AustraliaCity.objects.distance(ls).order_by('name')
for city, distance in zip(qs, distances):
# Testing equivalence to within a meter.
self.assertAlmostEqual(distance, city.distance.m, 0)
else:
# PostGIS 1.4 and below is limited to disance queries only
# to/from point geometries, check for raising of ValueError.
self.assertRaises(ValueError, AustraliaCity.objects.distance, ls)
self.assertRaises(ValueError, AustraliaCity.objects.distance, ls.wkt)
# Got the reference distances using the raw SQL statements:
# SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326), 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11));
# SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326)) FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere
if connection.ops.postgis and connection.ops.proj_version_tuple() >= (4, 7, 0):
# PROJ.4 versions 4.7+ have updated datums, and thus different
# distance values.
spheroid_distances = [60504.0628957201, 77023.9489850262, 49154.8867574404,
90847.4358768573, 217402.811919332, 709599.234564757,
640011.483550888, 7772.00667991925, 1047861.78619339,
1165126.55236034]
sphere_distances = [60580.9693849267, 77144.0435286473, 49199.4415344719,
90804.7533823494, 217713.384600405, 709134.127242793,
639828.157159169, 7786.82949717788, 1049204.06569028,
1162623.7238134]
else:
spheroid_distances = [60504.0628825298, 77023.948962654, 49154.8867507115,
90847.435881812, 217402.811862568, 709599.234619957,
640011.483583758, 7772.00667666425, 1047861.7859506,
1165126.55237647]
sphere_distances = [60580.7612632291, 77143.7785056615, 49199.2725132184,
90804.4414289463, 217712.63666124, 709131.691061906,
639825.959074112, 7786.80274606706, 1049200.46122281,
1162619.7297006]
# Testing with spheroid distances first.
hillsdale = AustraliaCity.objects.get(name='Hillsdale')
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point, spheroid=True)
for i, c in enumerate(qs):
self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol)
if postgis:
# PostGIS uses sphere-only distances by default, testing these as well.
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point)
for i, c in enumerate(qs):
self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol)
@no_oracle # Oracle already handles geographic distance calculation.
def test_distance_transform(self):
"""
Test the `distance` GeoQuerySet method used with `transform` on a geographic field.
"""
# Normally you can't compute distances from a geometry field
# that is not a PointField (on PostGIS 1.4 and below).
if not connection.ops.geography:
self.assertRaises(ValueError, CensusZipcode.objects.distance, self.stx_pnt)
# We'll be using a Polygon (created by buffering the centroid
# of 77005 to 100m) -- which aren't allowed in geographic distance
# queries normally, however our field has been transformed to
# a non-geographic system.
z = SouthTexasZipcode.objects.get(name='77005')
# Reference query:
# SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140), ST_GeomFromText('<buffer_wkt>', 32140)) FROM "distapp_censuszipcode";
dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242]
# Having our buffer in the SRID of the transformation and of the field
# -- should get the same results. The first buffer has no need for
# transformation SQL because it is the same SRID as what was given
# to `transform()`. The second buffer will need to be transformed,
# however.
buf1 = z.poly.centroid.buffer(100)
buf2 = buf1.transform(4269, clone=True)
ref_zips = ['77002', '77025', '77401']
for buf in [buf1, buf2]:
qs = CensusZipcode.objects.exclude(name='77005').transform(32140).distance(buf)
self.assertListEqual(ref_zips, self.get_names(qs))
for i, z in enumerate(qs):
self.assertAlmostEqual(z.distance.m, dists_m[i], 5)
def test_distance_lookups(self):
"""
Test the `distance_lt`, `distance_gt`, `distance_lte`, and `distance_gte` lookup types.
"""
# Retrieving the cities within a 20km 'donut' w/a 7km radius 'hole'
# (thus, Houston and Southside place will be excluded as tested in
# the `test02_dwithin` above).
qs1 = SouthTexasCity.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
# Can't determine the units on SpatiaLite from PROJ.4 string, and
# Oracle 11 incorrectly thinks it is not projected.
if spatialite or oracle:
dist_qs = (qs1,)
else:
qs2 = SouthTexasCityFt.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
dist_qs = (qs1, qs2)
for qs in dist_qs:
cities = self.get_names(qs)
self.assertEqual(cities, ['Bellaire', 'Pearland', 'West University Place'])
# Doing a distance query using Polygons instead of a Point.
z = SouthTexasZipcode.objects.get(name='77005')
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=275)))
self.assertEqual(['77025', '77401'], self.get_names(qs))
# If we add a little more distance 77002 should be included.
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=300)))
self.assertEqual(['77002', '77025', '77401'], self.get_names(qs))
def test_geodetic_distance_lookups(self):
"""
Test distance lookups on geodetic coordinate systems.
"""
# Line is from Canberra to Sydney. Query is for all other cities within
# a 100km of that line (which should exclude only Hobart & Adelaide).
line = GEOSGeometry('LINESTRING(144.9630 -37.8143,151.2607 -33.8870)', 4326)
dist_qs = AustraliaCity.objects.filter(point__distance_lte=(line, D(km=100)))
if oracle or connection.ops.geography:
# Oracle and PostGIS 1.5 can do distance lookups on arbitrary geometries.
self.assertEqual(9, dist_qs.count())
self.assertEqual(['Batemans Bay', 'Canberra', 'Hillsdale',
'Melbourne', 'Mittagong', 'Shellharbour',
'Sydney', 'Thirroul', 'Wollongong'],
self.get_names(dist_qs))
else:
# PostGIS 1.4 and below only allows geodetic distance queries (utilizing
# ST_Distance_Sphere/ST_Distance_Spheroid) from Points to PointFields
# on geometry columns.
self.assertRaises(ValueError, dist_qs.count)
# Ensured that a ValueError was raised, none of the rest of the test is
# support on this backend, so bail now.
if spatialite:
return
# Too many params (4 in this case) should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4')))
# Not enough params should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)',)))
# Getting all cities w/in 550 miles of Hobart.
hobart = AustraliaCity.objects.get(name='Hobart')
qs = AustraliaCity.objects.exclude(name='Hobart').filter(point__distance_lte=(hobart.point, D(mi=550)))
cities = self.get_names(qs)
self.assertEqual(cities, ['Batemans Bay', 'Canberra', 'Melbourne'])
# Cities that are either really close or really far from Wollongong --
# and using different units of distance.
wollongong = AustraliaCity.objects.get(name='Wollongong')
d1, d2 = D(yd=19500), D(nm=400) # Yards (~17km) & Nautical miles.
# Normal geodetic distance lookup (uses `distance_sphere` on PostGIS.
gq1 = Q(point__distance_lte=(wollongong.point, d1))
gq2 = Q(point__distance_gte=(wollongong.point, d2))
qs1 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq1 | gq2)
# Geodetic distance lookup but telling GeoDjango to use `distance_spheroid`
# instead (we should get the same results b/c accuracy variance won't matter
# in this test case).
if postgis:
gq3 = Q(point__distance_lte=(wollongong.point, d1, 'spheroid'))
gq4 = Q(point__distance_gte=(wollongong.point, d2, 'spheroid'))
qs2 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq3 | gq4)
querysets = [qs1, qs2]
else:
querysets = [qs1]
for qs in querysets:
cities = self.get_names(qs)
self.assertEqual(cities, ['Adelaide', 'Hobart', 'Shellharbour', 'Thirroul'])
def test_area(self):
"""
Test the `area` GeoQuerySet method.
"""
# Reference queries:
# SELECT ST_Area(poly) FROM distapp_southtexaszipcode;
area_sq_m = [5437908.90234375, 10183031.4389648, 11254471.0073242, 9881708.91772461]
# Tolerance has to be lower for Oracle and differences
# with GEOS 3.0.0RC4
tol = 2
for i, z in enumerate(SouthTexasZipcode.objects.area()):
self.assertAlmostEqual(area_sq_m[i], z.area.sq_m, tol)
def test_length(self):
"""
Test the `length` GeoQuerySet method.
"""
# Reference query (should use `length_spheroid`).
# SELECT ST_length_spheroid(ST_GeomFromText('<wkt>', 4326) 'SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]]');
len_m1 = 473504.769553813
len_m2 = 4617.668
if spatialite:
# Does not support geodetic coordinate systems.
self.assertRaises(ValueError, Interstate.objects.length)
else:
qs = Interstate.objects.length()
tol = 2 if oracle else 3
self.assertAlmostEqual(len_m1, qs[0].length.m, tol)
# Now doing length on a projected coordinate system.
i10 = SouthTexasInterstate.objects.length().get(name='I-10')
self.assertAlmostEqual(len_m2, i10.length.m, 2)
@no_spatialite
def test_perimeter(self):
"""
Test the `perimeter` GeoQuerySet method.
"""
# Reference query:
# SELECT ST_Perimeter(distapp_southtexaszipcode.poly) FROM distapp_southtexaszipcode;
perim_m = [18404.3550889361, 15627.2108551001, 20632.5588368978, 17094.5996143697]
tol = 2 if oracle else 7
for i, z in enumerate(SouthTexasZipcode.objects.perimeter()):
self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol)
# Running on points; should return 0.
for i, c in enumerate(SouthTexasCity.objects.perimeter(model_att='perim')):
self.assertEqual(0, c.perim.m)
def test_measurement_null_fields(self):
"""
Test the measurement GeoQuerySet methods on fields with NULL values.
"""
# Creating SouthTexasZipcode w/NULL value.
SouthTexasZipcode.objects.create(name='78212')
# Performing distance/area queries against the NULL PolygonField,
# and ensuring the result of the operations is None.
htown = SouthTexasCity.objects.get(name='Downtown Houston')
z = SouthTexasZipcode.objects.distance(htown.point).area().get(name='78212')
self.assertIsNone(z.distance)
self.assertIsNone(z.area)
| mit |
ivanvladimir/gensim | gensim/corpora/ucicorpus.py | 68 | 7517 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Jonathan Esterhazy <jonathan.esterhazy at gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
University of California, Irvine (UCI) Bag-of-Words format.
http://archive.ics.uci.edu/ml/datasets/Bag+of+Words
"""
from __future__ import with_statement
import logging
from collections import defaultdict
from gensim import utils
from gensim.corpora import Dictionary
from gensim.corpora import IndexedCorpus
from gensim.matutils import MmReader
from gensim.matutils import MmWriter
from six import iteritems, string_types
from six.moves import xrange
logger = logging.getLogger('gensim.corpora.ucicorpus')
class UciReader(MmReader):
def __init__(self, input):
"""
Initialize the reader.
The `input` parameter refers to a file on the local filesystem,
which is expected to be in the UCI Bag-of-Words format.
"""
logger.info('Initializing corpus reader from %s' % input)
self.input = input
with utils.smart_open(self.input) as fin:
self.num_docs = self.num_terms = self.num_nnz = 0
try:
self.num_docs = int(next(fin).strip())
self.num_terms = int(next(fin).strip())
self.num_nnz = int(next(fin).strip())
except StopIteration:
pass
logger.info('accepted corpus with %i documents, %i features, %i non-zero entries' %
(self.num_docs, self.num_terms, self.num_nnz))
def skip_headers(self, input_file):
for lineno, _ in enumerate(input_file):
if lineno == 2:
break
# endclass UciReader
class UciWriter(MmWriter):
"""
Store a corpus in UCI Bag-of-Words format.
This corpus format is identical to MM format, except for
different file headers. There is no format line, and the first
three lines of the file contain number_docs, num_terms, and num_nnz,
one value per line.
This implementation is based on matutils.MmWriter, and works the same way.
"""
MAX_HEADER_LENGTH = 20 # reserve 20 bytes per header value
FAKE_HEADER = utils.to_utf8(' ' * MAX_HEADER_LENGTH + '\n')
def write_headers(self):
"""
Write blank header lines. Will be updated later, once corpus stats are known.
"""
for _ in range(3):
self.fout.write(self.FAKE_HEADER)
self.last_docno = -1
self.headers_written = True
def update_headers(self, num_docs, num_terms, num_nnz):
"""
Update headers with actual values.
"""
offset = 0
values = [utils.to_utf8(str(n)) for n in [num_docs, num_terms, num_nnz]]
for value in values:
if len(value) > len(self.FAKE_HEADER):
raise ValueError('Invalid header: value too large!')
self.fout.seek(offset)
self.fout.write(value)
offset += len(self.FAKE_HEADER)
@staticmethod
def write_corpus(fname, corpus, progress_cnt=1000, index=False):
writer = UciWriter(fname)
writer.write_headers()
num_terms, num_nnz = 0, 0
docno, poslast = -1, -1
offsets = []
for docno, bow in enumerate(corpus):
if docno % progress_cnt == 0:
logger.info("PROGRESS: saving document #%i" % docno)
if index:
posnow = writer.fout.tell()
if posnow == poslast:
offsets[-1] = -1
offsets.append(posnow)
poslast = posnow
vector = [(x, int(y)) for (x, y) in bow if int(y) != 0] # integer count, not floating weights
max_id, veclen = writer.write_vector(docno, vector)
num_terms = max(num_terms, 1 + max_id)
num_nnz += veclen
num_docs = docno + 1
if num_docs * num_terms != 0:
logger.info("saved %ix%i matrix, density=%.3f%% (%i/%i)" %
(num_docs, num_terms,
100.0 * num_nnz / (num_docs * num_terms),
num_nnz,
num_docs * num_terms))
# now write proper headers, by seeking and overwriting the spaces written earlier
writer.update_headers(num_docs, num_terms, num_nnz)
writer.close()
if index:
return offsets
# endclass UciWriter
class UciCorpus(UciReader, IndexedCorpus):
"""
Corpus in the UCI bag-of-words format.
"""
def __init__(self, fname, fname_vocab=None):
IndexedCorpus.__init__(self, fname)
UciReader.__init__(self, fname)
if fname_vocab is None:
fname_vocab = utils.smart_extension(fname, '.vocab')
self.fname = fname
with utils.smart_open(fname_vocab) as fin:
words = [word.strip() for word in fin]
self.id2word = dict(enumerate(words))
self.transposed = True
def __iter__(self):
"""
Interpret a matrix in UCI bag-of-words format as a streamed gensim corpus
(yielding one document at a time).
"""
for docId, doc in super(UciCorpus, self).__iter__():
yield doc # get rid of docId, return the sparse vector only
def create_dictionary(self):
"""
Utility method to generate gensim-style Dictionary directly from
the corpus and vocabulary data.
"""
dictionary = Dictionary()
# replace dfs with defaultdict to avoid downstream KeyErrors
# uci vocabularies may contain terms that are not used in the document data
dictionary.dfs = defaultdict(int)
dictionary.id2token = self.id2word
dictionary.token2id = dict((v, k) for k, v in iteritems(self.id2word))
dictionary.num_docs = self.num_docs
dictionary.num_nnz = self.num_nnz
for docno, doc in enumerate(self):
if docno % 10000 == 0:
logger.info('PROGRESS: processing document %i of %i' % (docno, self.num_docs))
for word, count in doc:
dictionary.dfs[word] += 1
dictionary.num_pos += count
return dictionary
@staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=10000, metadata=False):
"""
Save a corpus in the UCI Bag-of-Words format.
There are actually two files saved: `fname` and `fname.vocab`, where
`fname.vocab` is the vocabulary file.
This function is automatically called by `UciCorpus.serialize`; don't
call it directly, call `serialize` instead.
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
num_terms = len(id2word)
else:
num_terms = 1 + max([-1] + id2word.keys())
# write out vocabulary
fname_vocab = utils.smart_extension(fname, '.vocab')
logger.info("saving vocabulary of %i words to %s" % (num_terms, fname_vocab))
with utils.smart_open(fname_vocab, 'wb') as fout:
for featureid in xrange(num_terms):
fout.write(utils.to_utf8("%s\n" % id2word.get(featureid, '---')))
logger.info("storing corpus in UCI Bag-of-Words format: %s" % fname)
return UciWriter.write_corpus(fname, corpus, index=True, progress_cnt=progress_cnt)
# endclass UciCorpus
| gpl-3.0 |
AlexOugh/horizon | openstack_dashboard/dashboards/identity/roles/urls.py | 64 | 1070 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.identity.roles import views
urlpatterns = patterns(
'openstack_dashboard.dashboards.identity.roles.views',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<role_id>[^/]+)/update/$',
views.UpdateView.as_view(), name='update'),
url(r'^create/$', views.CreateView.as_view(), name='create'))
| apache-2.0 |
opencb-cloud/opencga-lib | opencga-cloud/analysis/samtools-0.1.18/misc/varfilter.py | 80 | 5783 | #!/software/bin/python
# Author: lh3, converted to python and modified to add -C option by Aylwyn Scally
#
# About:
# varfilter.py is a port of Heng's samtools.pl varFilter script into
# python, with an additional -C INT option. This option sets a minimum
# consensus score, above which the script will output a pileup line
# wherever it _could have_ called a variant, even if none is actually
# called (i.e. hom-ref positions). This is important if you want to
# subsequently merge the calls with those for another individual to get a
# synoptic view of calls at each site. Without this option, and in all
# other respects, it behaves like samtools.pl varFilter.
#
# Aylwyn Scally [email protected]
# Filtration code:
#
# C low CNS quality (hom-ref only)
# d low depth
# D high depth
# W too many SNPs in a window (SNP only)
# G close to a high-quality indel (SNP only)
# Q low RMS mapping quality (SNP only)
# g close to another indel with higher quality (indel only)
# s low SNP quality (SNP only)
# i low indel quality (indel only)
import sys
import getopt
def usage():
print '''usage: varfilter.py [options] [cns-pileup]
Options: -Q INT minimum RMS mapping quality for SNPs
-q INT minimum RMS mapping quality for gaps
-d INT minimum read depth
-D INT maximum read depth
-S INT minimum SNP quality
-i INT minimum indel quality
-C INT minimum consensus quality for hom-ref sites
-G INT min indel score for nearby SNP filtering
-w INT SNP within INT bp around a gap to be filtered
-W INT window size for filtering dense SNPs
-N INT max number of SNPs in a window
-l INT window size for filtering adjacent gaps
-p print filtered variants'''
def varFilter_aux(first, is_print):
try:
if first[1] == 0:
sys.stdout.write("\t".join(first[4:]) + "\n")
elif is_print:
sys.stderr.write("\t".join(["UQdDWGgsiCX"[first[1]]] + first[4:]) + "\n")
except IOError:
sys.exit()
mindepth = 3
maxdepth = 100
gapgapwin = 30
minsnpmapq = 25
mingapmapq = 10
minindelscore = 25
scorefactor = 100
snpgapwin = 10
densesnpwin = 10
densesnps = 2
printfilt = False
minsnpq = 0
minindelq = 0
mincnsq = 0
try:
options, args = getopt.gnu_getopt(sys.argv[1:], 'pq:d:D:l:Q:w:W:N:G:S:i:C:', [])
except getopt.GetoptError:
usage()
sys.exit(2)
for (oflag, oarg) in options:
if oflag == '-d': mindepth = int(oarg)
if oflag == '-D': maxdepth = int(oarg)
if oflag == '-l': gapgapwin = int(oarg)
if oflag == '-Q': minsnpmapq = int(oarg)
if oflag == '-q': mingapmapq = int(oarg)
if oflag == '-G': minindelscore = int(oarg)
if oflag == '-s': scorefactor = int(oarg)
if oflag == '-w': snpgapwin = int(oarg)
if oflag == '-W': densesnpwin = int(oarg)
if oflag == '-C': mincnsq = int(oarg)
if oflag == '-N': densesnps = int(oarg)
if oflag == '-p': printfilt = True
if oflag == '-S': minsnpq = int(oarg)
if oflag == '-i': minindelq = int(oarg)
if len(args) < 1:
inp = sys.stdin
else:
inp = open(args[0])
# calculate the window size
max_dist = max(gapgapwin, snpgapwin, densesnpwin)
staging = []
for t in (line.strip().split() for line in inp):
(flt, score) = (0, -1)
# non-var sites
if t[3] == '*/*':
continue
is_snp = t[2].upper() != t[3].upper()
if not (is_snp or mincnsq):
continue
# clear the out-of-range elements
while staging:
# Still on the same chromosome and the first element's window still affects this position?
if staging[0][4] == t[0] and int(staging[0][5]) + staging[0][2] + max_dist >= int(t[1]):
break
varFilter_aux(staging.pop(0), printfilt)
# first a simple filter
if int(t[7]) < mindepth:
flt = 2
elif int(t[7]) > maxdepth:
flt = 3
if t[2] == '*': # an indel
if minindelq and minindelq > int(t[5]):
flt = 8
elif is_snp:
if minsnpq and minsnpq> int(t[5]):
flt = 7
else:
if mincnsq and mincnsq > int(t[4]):
flt = 9
# site dependent filters
dlen = 0
if flt == 0:
if t[2] == '*': # an indel
# If deletion, remember the length of the deletion
(a,b) = t[3].split('/')
alen = len(a) - 1
blen = len(b) - 1
if alen>blen:
if a[0] == '-': dlen=alen
elif b[0] == '-': dlen=blen
if int(t[6]) < mingapmapq:
flt = 1
# filtering SNPs
if int(t[5]) >= minindelscore:
for x in (y for y in staging if y[3]):
# Is it a SNP and is it outside the SNP filter window?
if x[0] >= 0 or int(x[5]) + x[2] + snpgapwin < int(t[1]):
continue
if x[1] == 0:
x[1] = 5
# calculate the filtering score (different from indel quality)
score = int(t[5])
if t[8] != '*':
score += scorefactor * int(t[10])
if t[9] != '*':
score += scorefactor * int(t[11])
# check the staging list for indel filtering
for x in (y for y in staging if y[3]):
# Is it a SNP and is it outside the gap filter window
if x[0] < 0 or int(x[5]) + x[2] + gapgapwin < int(t[1]):
continue
if x[0] < score:
x[1] = 6
else:
flt = 6
break
else: # a SNP or hom-ref
if int(t[6]) < minsnpmapq:
flt = 1
# check adjacent SNPs
k = 1
for x in (y for y in staging if y[3]):
if x[0] < 0 and int(x[5]) + x[2] + densesnpwin >= int(t[1]) and (x[1] == 0 or x[1] == 4 or x[1] == 5):
k += 1
# filtering is necessary
if k > densesnps:
flt = 4
for x in (y for y in staging if y[3]):
if x[0] < 0 and int(x[5]) + x[2] + densesnpwin >= int(t[1]) and x[1] == 0:
x[1] = 4
else: # then check gap filter
for x in (y for y in staging if y[3]):
if x[0] < 0 or int(x[5]) + x[2] + snpgapwin < int(t[1]):
continue
if x[0] >= minindelscore:
flt = 5
break
staging.append([score, flt, dlen, is_snp] + t)
# output the last few elements in the staging list
while staging:
varFilter_aux(staging.pop(0), printfilt)
| gpl-2.0 |
kuiche/chromium | tools/grit/grit/tool/count.py | 7 | 1024 | #!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Count number of occurrences of a given message ID
'''
import getopt
import os
import types
from grit.tool import interface
from grit import grd_reader
from grit import util
from grit.extern import tclib
class CountMessage(interface.Tool):
'''Count the number of times a given message ID is used.
'''
def __init__(self):
pass
def ShortDescription(self):
return 'Exports all translateable messages into an XMB file.'
def Run(self, opts, args):
self.SetOptions(opts)
id = args[0]
res_tree = grd_reader.Parse(opts.input, debug=opts.extra_verbose)
res_tree.OnlyTheseTranslations([])
res_tree.RunGatherers(True)
count = 0
for c in res_tree.UberClique().AllCliques():
if c.GetId() == id:
count += 1
print "There are %d occurrences of message %s." % (count, id)
| bsd-3-clause |
gamechanger/kafka-python | kafka/protocol/admin.py | 1 | 1182 | from .struct import Struct
from .types import Array, Bytes, Int16, Schema, String
class ListGroupsResponse(Struct):
SCHEMA = Schema(
('error_code', Int16),
('groups', Array(
('group', String('utf-8')),
('protocol_type', String('utf-8'))))
)
class ListGroupsRequest(Struct):
API_KEY = 16
API_VERSION = 0
RESPONSE_TYPE = ListGroupsResponse
SCHEMA = Schema()
class DescribeGroupsResponse(Struct):
SCHEMA = Schema(
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))))
)
class DescribeGroupsRequest(Struct):
API_KEY = 15
API_VERSION = 0
RESPONSE_TYPE = DescribeGroupsResponse
SCHEMA = Schema(
('groups', Array(String('utf-8')))
)
| apache-2.0 |
CooperLuan/airflow | airflow/operators/hive_to_mysql.py | 29 | 2324 | import logging
from airflow.hooks import HiveServer2Hook, MySqlHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
class HiveToMySqlTransfer(BaseOperator):
"""
Moves data from Hive to MySQL, note that for now the data is loaded
into memory before being pushed to MySQL, so this operator should
be used for smallish amount of data.
:param sql: SQL query to execute against the MySQL database
:type sql: str
:param mysql_table: target MySQL table, use dot notation to target a
specific database
:type mysql_table: str
:param mysql_conn_id: source mysql connection
:type mysql_conn_id: str
:param hiveserver2_conn_id: destination hive connection
:type hiveserver2_conn_id: str
:param mysql_preoperator: sql statement to run against mysql prior to
import, typically use to truncate of delete in place of the data
coming in, allowing the task to be idempotent (running the task
twice won't double load data)
:type mysql_preoperator: str
"""
template_fields = ('sql', 'mysql_table', 'mysql_preoperator')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
sql,
mysql_table,
hiveserver2_conn_id='hiveserver2_default',
mysql_conn_id='mysql_default',
mysql_preoperator=None,
*args, **kwargs):
super(HiveToMySqlTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.mysql_table = mysql_table
self.mysql_conn_id = mysql_conn_id
self.mysql_preoperator = mysql_preoperator
self.hiveserver2_conn_id = hiveserver2_conn_id
def execute(self, context):
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
logging.info("Extracting data from Hive")
logging.info(self.sql)
results = hive.get_records(self.sql)
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
if self.mysql_preoperator:
logging.info("Running MySQL preoperator")
logging.info(self.mysql_preoperator)
mysql.run(self.mysql_preoperator)
logging.info("Inserting rows into MySQL")
mysql.insert_rows(table=self.mysql_table, rows=results)
| apache-2.0 |
elainenaomi/sciwonc-dataflow-examples | sbbd2016/experiments/1-postgres/3_workflow_full_10files_primary_nosh_nors_annot_with_proj_3s/pegasus.bDkvI/pegasus-4.6.0/lib/pegasus/externals/python/boto/__init__.py | 87 | 41904 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# Copyright (c) 2010, Google, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.pyami.config import Config, BotoConfigLocations
from boto.storage_uri import BucketStorageUri, FileStorageUri
import boto.plugin
import datetime
import os
import platform
import re
import sys
import logging
import logging.config
from boto.compat import urlparse
from boto.exception import InvalidUriError
__version__ = '2.38.0'
Version = __version__ # for backware compatibility
# http://bugs.python.org/issue7980
datetime.datetime.strptime('', '')
UserAgent = 'Boto/%s Python/%s %s/%s' % (
__version__,
platform.python_version(),
platform.system(),
platform.release()
)
config = Config()
# Regex to disallow buckets violating charset or not [3..255] chars total.
BUCKET_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9\._-]{1,253}[a-zA-Z0-9]$')
# Regex to disallow buckets with individual DNS labels longer than 63.
TOO_LONG_DNS_NAME_COMP = re.compile(r'[-_a-z0-9]{64}')
GENERATION_RE = re.compile(r'(?P<versionless_uri_str>.+)'
r'#(?P<generation>[0-9]+)$')
VERSION_RE = re.compile('(?P<versionless_uri_str>.+)#(?P<version_id>.+)$')
ENDPOINTS_PATH = os.path.join(os.path.dirname(__file__), 'endpoints.json')
def init_logging():
for file in BotoConfigLocations:
try:
logging.config.fileConfig(os.path.expanduser(file))
except:
pass
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('boto')
perflog = logging.getLogger('boto.perf')
log.addHandler(NullHandler())
perflog.addHandler(NullHandler())
init_logging()
# convenience function to set logging to a particular file
def set_file_logger(name, filepath, level=logging.INFO, format_string=None):
global log
if not format_string:
format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
fh = logging.FileHandler(filepath)
fh.setLevel(level)
formatter = logging.Formatter(format_string)
fh.setFormatter(formatter)
logger.addHandler(fh)
log = logger
def set_stream_logger(name, level=logging.DEBUG, format_string=None):
global log
if not format_string:
format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
fh = logging.StreamHandler()
fh.setLevel(level)
formatter = logging.Formatter(format_string)
fh.setFormatter(formatter)
logger.addHandler(fh)
log = logger
def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sqs.connection.SQSConnection`
:return: A connection to Amazon's SQS
"""
from boto.sqs.connection import SQSConnection
return SQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to Amazon's S3
"""
from boto.s3.connection import S3Connection
return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs):
"""
@type gs_access_key_id: string
@param gs_access_key_id: Your Google Cloud Storage Access Key ID
@type gs_secret_access_key: string
@param gs_secret_access_key: Your Google Cloud Storage Secret Access Key
@rtype: L{GSConnection<boto.gs.connection.GSConnection>}
@return: A connection to Google's Storage service
"""
from boto.gs.connection import GSConnection
return GSConnection(gs_access_key_id, gs_secret_access_key, **kwargs)
def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Amazon's EC2
"""
from boto.ec2.connection import EC2Connection
return EC2Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.elb.ELBConnection`
:return: A connection to Amazon's Load Balancing Service
"""
from boto.ec2.elb import ELBConnection
return ELBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.autoscale.AutoScaleConnection`
:return: A connection to Amazon's Auto Scaling Service
:type use_block_device_types bool
:param use_block_device_types: Specifies whether to return described Launch Configs with block device mappings containing
block device types, or a list of old style block device mappings (deprecated). This defaults to false for compatability
with the old incorrect style.
"""
from boto.ec2.autoscale import AutoScaleConnection
return AutoScaleConnection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.cloudwatch.CloudWatchConnection`
:return: A connection to Amazon's EC2 Monitoring service
"""
from boto.ec2.cloudwatch import CloudWatchConnection
return CloudWatchConnection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sdb.connection.SDBConnection`
:return: A connection to Amazon's SDB
"""
from boto.sdb.connection import SDBConnection
return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.fps.connection.FPSConnection`
:return: A connection to FPS
"""
from boto.fps.connection import FPSConnection
return FPSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.mturk.connection.MTurkConnection`
:return: A connection to MTurk
"""
from boto.mturk.connection import MTurkConnection
return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.fps.connection.FPSConnection`
:return: A connection to FPS
"""
from boto.cloudfront import CloudFrontConnection
return CloudFrontConnection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.vpc.VPCConnection`
:return: A connection to VPC
"""
from boto.vpc import VPCConnection
return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.rds.RDSConnection`
:return: A connection to RDS
"""
from boto.rds import RDSConnection
return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_rds2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.rds2.layer1.RDSConnection`
:return: A connection to RDS
"""
from boto.rds2.layer1 import RDSConnection
return RDSConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.emr.EmrConnection`
:return: A connection to Elastic mapreduce
"""
from boto.emr import EmrConnection
return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sns.SNSConnection`
:return: A connection to Amazon's SNS
"""
from boto.sns import SNSConnection
return SNSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_iam(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.iam.IAMConnection`
:return: A connection to Amazon's IAM
"""
from boto.iam import IAMConnection
return IAMConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_route53(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.dns.Route53Connection`
:return: A connection to Amazon's Route53 DNS Service
"""
from boto.route53 import Route53Connection
return Route53Connection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_cloudformation(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.cloudformation.CloudFormationConnection`
:return: A connection to Amazon's CloudFormation Service
"""
from boto.cloudformation import CloudFormationConnection
return CloudFormationConnection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None,
port=8773, path='/services/Eucalyptus', is_secure=False,
**kwargs):
"""
Connect to a Eucalyptus service.
:type host: string
:param host: the host name or ip address of the Eucalyptus server
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Eucalyptus server
"""
from boto.ec2 import EC2Connection
from boto.ec2.regioninfo import RegionInfo
# Check for values in boto config, if not supplied as args
if not aws_access_key_id:
aws_access_key_id = config.get('Credentials',
'euca_access_key_id',
None)
if not aws_secret_access_key:
aws_secret_access_key = config.get('Credentials',
'euca_secret_access_key',
None)
if not host:
host = config.get('Boto', 'eucalyptus_host', None)
reg = RegionInfo(name='eucalyptus', endpoint=host)
return EC2Connection(aws_access_key_id, aws_secret_access_key,
region=reg, port=port, path=path,
is_secure=is_secure, **kwargs)
def connect_glacier(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.glacier.layer2.Layer2`
:return: A connection to Amazon's Glacier Service
"""
from boto.glacier.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_ec2_endpoint(url, aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to an EC2 Api endpoint. Additional arguments are passed
through to connect_ec2.
:type url: string
:param url: A url for the ec2 api endpoint to connect to
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Eucalyptus server
"""
from boto.ec2.regioninfo import RegionInfo
purl = urlparse(url)
kwargs['port'] = purl.port
kwargs['host'] = purl.hostname
kwargs['path'] = purl.path
if not 'is_secure' in kwargs:
kwargs['is_secure'] = (purl.scheme == "https")
kwargs['region'] = RegionInfo(name=purl.hostname,
endpoint=purl.hostname)
kwargs['aws_access_key_id'] = aws_access_key_id
kwargs['aws_secret_access_key'] = aws_secret_access_key
return(connect_ec2(**kwargs))
def connect_walrus(host=None, aws_access_key_id=None,
aws_secret_access_key=None,
port=8773, path='/services/Walrus', is_secure=False,
**kwargs):
"""
Connect to a Walrus service.
:type host: string
:param host: the host name or ip address of the Walrus server
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to Walrus
"""
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
# Check for values in boto config, if not supplied as args
if not aws_access_key_id:
aws_access_key_id = config.get('Credentials',
'euca_access_key_id',
None)
if not aws_secret_access_key:
aws_secret_access_key = config.get('Credentials',
'euca_secret_access_key',
None)
if not host:
host = config.get('Boto', 'walrus_host', None)
return S3Connection(aws_access_key_id, aws_secret_access_key,
host=host, port=port, path=path,
calling_format=OrdinaryCallingFormat(),
is_secure=is_secure, **kwargs)
def connect_ses(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ses.SESConnection`
:return: A connection to Amazon's SES
"""
from boto.ses import SESConnection
return SESConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_sts(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sts.STSConnection`
:return: A connection to Amazon's STS
"""
from boto.sts import STSConnection
return STSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_ia(ia_access_key_id=None, ia_secret_access_key=None,
is_secure=False, **kwargs):
"""
Connect to the Internet Archive via their S3-like API.
:type ia_access_key_id: string
:param ia_access_key_id: Your IA Access Key ID. This will also look
in your boto config file for an entry in the Credentials
section called "ia_access_key_id"
:type ia_secret_access_key: string
:param ia_secret_access_key: Your IA Secret Access Key. This will also
look in your boto config file for an entry in the Credentials
section called "ia_secret_access_key"
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to the Internet Archive
"""
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
access_key = config.get('Credentials', 'ia_access_key_id',
ia_access_key_id)
secret_key = config.get('Credentials', 'ia_secret_access_key',
ia_secret_access_key)
return S3Connection(access_key, secret_key,
host='s3.us.archive.org',
calling_format=OrdinaryCallingFormat(),
is_secure=is_secure, **kwargs)
def connect_dynamodb(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.dynamodb.layer2.Layer2`
:return: A connection to the Layer2 interface for DynamoDB.
"""
from boto.dynamodb.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_swf(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.swf.layer1.Layer1`
:return: A connection to the Layer1 interface for SWF.
"""
from boto.swf.layer1 import Layer1
return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudsearch(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.cloudsearch.layer2.Layer2`
:return: A connection to Amazon's CloudSearch service
"""
from boto.cloudsearch.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_cloudsearch2(aws_access_key_id=None,
aws_secret_access_key=None,
sign_request=False,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:type sign_request: bool
:param sign_request: whether or not to sign search and
upload requests
:rtype: :class:`boto.cloudsearch2.layer2.Layer2`
:return: A connection to Amazon's CloudSearch2 service
"""
from boto.cloudsearch2.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key,
sign_request=sign_request,
**kwargs)
def connect_cloudsearchdomain(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.cloudsearchdomain.layer1.CloudSearchDomainConnection`
:return: A connection to Amazon's CloudSearch Domain service
"""
from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection
return CloudSearchDomainConnection(aws_access_key_id,
aws_secret_access_key, **kwargs)
def connect_beanstalk(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.beanstalk.layer1.Layer1`
:return: A connection to Amazon's Elastic Beanstalk service
"""
from boto.beanstalk.layer1 import Layer1
return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_elastictranscoder(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ets.layer1.ElasticTranscoderConnection`
:return: A connection to Amazon's Elastic Transcoder service
"""
from boto.elastictranscoder.layer1 import ElasticTranscoderConnection
return ElasticTranscoderConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs)
def connect_opsworks(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
from boto.opsworks.layer1 import OpsWorksConnection
return OpsWorksConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs)
def connect_redshift(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.redshift.layer1.RedshiftConnection`
:return: A connection to Amazon's Redshift service
"""
from boto.redshift.layer1 import RedshiftConnection
return RedshiftConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_support(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.support.layer1.SupportConnection`
:return: A connection to Amazon's Support service
"""
from boto.support.layer1 import SupportConnection
return SupportConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_cloudtrail(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to AWS CloudTrail
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.cloudtrail.layer1.CloudtrailConnection`
:return: A connection to the AWS Cloudtrail service
"""
from boto.cloudtrail.layer1 import CloudTrailConnection
return CloudTrailConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_directconnect(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to AWS DirectConnect
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.directconnect.layer1.DirectConnectConnection`
:return: A connection to the AWS DirectConnect service
"""
from boto.directconnect.layer1 import DirectConnectConnection
return DirectConnectConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_kinesis(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to Amazon Kinesis
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
rtype: :class:`boto.kinesis.layer1.KinesisConnection`
:return: A connection to the Amazon Kinesis service
"""
from boto.kinesis.layer1 import KinesisConnection
return KinesisConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_logs(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to Amazon CloudWatch Logs
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
rtype: :class:`boto.kinesis.layer1.CloudWatchLogsConnection`
:return: A connection to the Amazon CloudWatch Logs service
"""
from boto.logs.layer1 import CloudWatchLogsConnection
return CloudWatchLogsConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_route53domains(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to Amazon Route 53 Domains
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
rtype: :class:`boto.route53.domains.layer1.Route53DomainsConnection`
:return: A connection to the Amazon Route 53 Domains service
"""
from boto.route53.domains.layer1 import Route53DomainsConnection
return Route53DomainsConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_cognito_identity(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to Amazon Cognito Identity
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
rtype: :class:`boto.cognito.identity.layer1.CognitoIdentityConnection`
:return: A connection to the Amazon Cognito Identity service
"""
from boto.cognito.identity.layer1 import CognitoIdentityConnection
return CognitoIdentityConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_cognito_sync(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to Amazon Cognito Sync
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
rtype: :class:`boto.cognito.sync.layer1.CognitoSyncConnection`
:return: A connection to the Amazon Cognito Sync service
"""
from boto.cognito.sync.layer1 import CognitoSyncConnection
return CognitoSyncConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_kms(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to AWS Key Management Service
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
rtype: :class:`boto.kms.layer1.KMSConnection`
:return: A connection to the AWS Key Management Service
"""
from boto.kms.layer1 import KMSConnection
return KMSConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_awslambda(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to AWS Lambda
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
rtype: :class:`boto.awslambda.layer1.AWSLambdaConnection`
:return: A connection to the AWS Lambda service
"""
from boto.awslambda.layer1 import AWSLambdaConnection
return AWSLambdaConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_codedeploy(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to AWS CodeDeploy
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
rtype: :class:`boto.cognito.sync.layer1.CodeDeployConnection`
:return: A connection to the AWS CodeDeploy service
"""
from boto.codedeploy.layer1 import CodeDeployConnection
return CodeDeployConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_configservice(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to AWS Config
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
rtype: :class:`boto.kms.layer1.ConfigServiceConnection`
:return: A connection to the AWS Config service
"""
from boto.configservice.layer1 import ConfigServiceConnection
return ConfigServiceConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_cloudhsm(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to AWS CloudHSM
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
rtype: :class:`boto.cloudhsm.layer1.CloudHSMConnection`
:return: A connection to the AWS CloudHSM service
"""
from boto.cloudhsm.layer1 import CloudHSMConnection
return CloudHSMConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_ec2containerservice(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to Amazon EC2 Container Service
rtype: :class:`boto.ec2containerservice.layer1.EC2ContainerServiceConnection`
:return: A connection to the Amazon EC2 Container Service
"""
from boto.ec2containerservice.layer1 import EC2ContainerServiceConnection
return EC2ContainerServiceConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_machinelearning(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to Amazon Machine Learning service
rtype: :class:`boto.machinelearning.layer1.MachineLearningConnection`
:return: A connection to the Amazon Machine Learning service
"""
from boto.machinelearning.layer1 import MachineLearningConnection
return MachineLearningConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
bucket_storage_uri_class=BucketStorageUri,
suppress_consec_slashes=True, is_latest=False):
"""
Instantiate a StorageUri from a URI string.
:type uri_str: string
:param uri_str: URI naming bucket + optional object.
:type default_scheme: string
:param default_scheme: default scheme for scheme-less URIs.
:type debug: int
:param debug: debug level to pass in to boto connection (range 0..2).
:type validate: bool
:param validate: whether to check for bucket name validity.
:type bucket_storage_uri_class: BucketStorageUri interface.
:param bucket_storage_uri_class: Allows mocking for unit tests.
:param suppress_consec_slashes: If provided, controls whether
consecutive slashes will be suppressed in key paths.
:type is_latest: bool
:param is_latest: whether this versioned object represents the
current version.
We allow validate to be disabled to allow caller
to implement bucket-level wildcarding (outside the boto library;
see gsutil).
:rtype: :class:`boto.StorageUri` subclass
:return: StorageUri subclass for given URI.
``uri_str`` must be one of the following formats:
* gs://bucket/name
* gs://bucket/name#ver
* s3://bucket/name
* gs://bucket
* s3://bucket
* filename (which could be a Unix path like /a/b/c or a Windows path like
C:\a\b\c)
The last example uses the default scheme ('file', unless overridden).
"""
version_id = None
generation = None
# Manually parse URI components instead of using urlparse because
# what we're calling URIs don't really fit the standard syntax for URIs
# (the latter includes an optional host/net location part).
end_scheme_idx = uri_str.find('://')
if end_scheme_idx == -1:
scheme = default_scheme.lower()
path = uri_str
else:
scheme = uri_str[0:end_scheme_idx].lower()
path = uri_str[end_scheme_idx + 3:]
if scheme not in ['file', 's3', 'gs']:
raise InvalidUriError('Unrecognized scheme "%s"' % scheme)
if scheme == 'file':
# For file URIs we have no bucket name, and use the complete path
# (minus 'file://') as the object name.
is_stream = False
if path == '-':
is_stream = True
return FileStorageUri(path, debug, is_stream)
else:
path_parts = path.split('/', 1)
bucket_name = path_parts[0]
object_name = ''
# If validate enabled, ensure the bucket name is valid, to avoid
# possibly confusing other parts of the code. (For example if we didn't
# catch bucket names containing ':', when a user tried to connect to
# the server with that name they might get a confusing error about
# non-integer port numbers.)
if (validate and bucket_name and
(not BUCKET_NAME_RE.match(bucket_name)
or TOO_LONG_DNS_NAME_COMP.search(bucket_name))):
raise InvalidUriError('Invalid bucket name in URI "%s"' % uri_str)
if scheme == 'gs':
match = GENERATION_RE.search(path)
if match:
md = match.groupdict()
versionless_uri_str = md['versionless_uri_str']
path_parts = versionless_uri_str.split('/', 1)
generation = int(md['generation'])
elif scheme == 's3':
match = VERSION_RE.search(path)
if match:
md = match.groupdict()
versionless_uri_str = md['versionless_uri_str']
path_parts = versionless_uri_str.split('/', 1)
version_id = md['version_id']
else:
raise InvalidUriError('Unrecognized scheme "%s"' % scheme)
if len(path_parts) > 1:
object_name = path_parts[1]
return bucket_storage_uri_class(
scheme, bucket_name, object_name, debug,
suppress_consec_slashes=suppress_consec_slashes,
version_id=version_id, generation=generation, is_latest=is_latest)
def storage_uri_for_key(key):
"""Returns a StorageUri for the given key.
:type key: :class:`boto.s3.key.Key` or subclass
:param key: URI naming bucket + optional object.
"""
if not isinstance(key, boto.s3.key.Key):
raise InvalidUriError('Requested key (%s) is not a subclass of '
'boto.s3.key.Key' % str(type(key)))
prov_name = key.bucket.connection.provider.get_provider_name()
uri_str = '%s://%s/%s' % (prov_name, key.bucket.name, key.name)
return storage_uri(uri_str)
boto.plugin.load_plugins(config)
| gpl-3.0 |
sam-m888/gramps | gramps/gen/filters/rules/media/_hasnotematchingsubstringof.py | 5 | 1776 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .._hasnotesubstrbase import HasNoteSubstrBase
#-------------------------------------------------------------------------
# "Media having notes that contain a substring"
#-------------------------------------------------------------------------
class HasNoteMatchingSubstringOf(HasNoteSubstrBase):
"""Media having notes containing <substring>"""
name = _('Media objects having notes containing <substring>')
description = _("Matches media objects whose notes contain text "
"matching a substring")
| gpl-2.0 |
pkilambi/python-gnocchiclient | gnocchiclient/openstack/common/version.py | 15 | 5885 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities for consuming the auto-generated versioninfo files.
"""
import datetime
import pkg_resources
import setup
class _deferred_version_string(object):
"""Internal helper class which provides delayed version calculation."""
def __init__(self, version_info, prefix):
self.version_info = version_info
self.prefix = prefix
def __str__(self):
return "%s%s" % (self.prefix, self.version_info.version_string())
def __repr__(self):
return "%s%s" % (self.prefix, self.version_info.version_string())
class VersionInfo(object):
def __init__(self, package, python_package=None, pre_version=None):
"""Object that understands versioning for a package
:param package: name of the top level python namespace. For glance,
this would be "glance" for python-glanceclient, it
would be "glanceclient"
:param python_package: optional name of the project name. For
glance this can be left unset. For
python-glanceclient, this would be
"python-glanceclient"
:param pre_version: optional version that the project is working to
"""
self.package = package
if python_package is None:
self.python_package = package
else:
self.python_package = python_package
self.pre_version = pre_version
self.version = None
def _generate_version(self):
"""Defer to the openstack.common.setup routines for making a
version from git."""
if self.pre_version is None:
return setup.get_post_version(self.python_package)
else:
return setup.get_pre_version(self.python_package, self.pre_version)
def _newer_version(self, pending_version):
"""Check to see if we're working with a stale version or not.
We expect a version string that either looks like:
2012.2~f3~20120708.10.4426392
which is an unreleased version of a pre-version, or:
0.1.1.4.gcc9e28a
which is an unreleased version of a post-version, or:
0.1.1
Which is a release and which should match tag.
For now, if we have a date-embedded version, check to see if it's
old, and if so re-generate. Otherwise, just deal with it.
"""
try:
version_date = int(self.version.split("~")[-1].split('.')[0])
if version_date < int(datetime.date.today().strftime('%Y%m%d')):
return self._generate_version()
else:
return pending_version
except Exception:
return pending_version
def version_string_with_vcs(self, always=False):
"""Return the full version of the package including suffixes indicating
VCS status.
For instance, if we are working towards the 2012.2 release,
canonical_version_string should return 2012.2 if this is a final
release, or else something like 2012.2~f1~20120705.20 if it's not.
:param always: if true, skip all version caching
"""
if always:
self.version = self._generate_version()
if self.version is None:
requirement = pkg_resources.Requirement.parse(self.python_package)
versioninfo = "%s/versioninfo" % self.package
try:
raw_version = pkg_resources.resource_string(requirement,
versioninfo)
self.version = self._newer_version(raw_version.strip())
except (IOError, pkg_resources.DistributionNotFound):
self.version = self._generate_version()
return self.version
def canonical_version_string(self, always=False):
"""Return the simple version of the package excluding any suffixes.
For instance, if we are working towards the 2012.2 release,
canonical_version_string should return 2012.2 in all cases.
:param always: if true, skip all version caching
"""
return self.version_string_with_vcs(always).split('~')[0]
def version_string(self, always=False):
"""Return the base version of the package.
For instance, if we are working towards the 2012.2 release,
version_string should return 2012.2 if this is a final release, or
2012.2-dev if it is not.
:param always: if true, skip all version caching
"""
version_parts = self.version_string_with_vcs(always).split('~')
if len(version_parts) == 1:
return version_parts[0]
else:
return '%s-dev' % (version_parts[0],)
def deferred_version_string(self, prefix=""):
"""Generate an object which will expand in a string context to
the results of version_string(). We do this so that don't
call into pkg_resources every time we start up a program when
passing version information into the CONF constructor, but
rather only do the calculation when and if a version is requested
"""
return _deferred_version_string(self, prefix)
| apache-2.0 |
UTSA-ICS/keystone-SID | keystone/tests/test_auth.py | 1 | 44678 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import uuid
import mock
from keystone import assignment
from keystone import auth
from keystone.common import authorization
from keystone.common import environment
from keystone import config
from keystone import exception
from keystone.openstack.common import timeutils
from keystone import tests
from keystone.tests import default_fixtures
from keystone import token
from keystone import trust
CONF = config.CONF
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
HOST_URL = 'http://keystone:5001'
def _build_user_auth(token=None, user_id=None, username=None,
password=None, tenant_id=None, tenant_name=None,
trust_id=None):
"""Build auth dictionary.
It will create an auth dictionary based on all the arguments
that it receives.
"""
auth_json = {}
if token is not None:
auth_json['token'] = token
if username or password:
auth_json['passwordCredentials'] = {}
if username is not None:
auth_json['passwordCredentials']['username'] = username
if user_id is not None:
auth_json['passwordCredentials']['userId'] = user_id
if password is not None:
auth_json['passwordCredentials']['password'] = password
if tenant_name is not None:
auth_json['tenantName'] = tenant_name
if tenant_id is not None:
auth_json['tenantId'] = tenant_id
if trust_id is not None:
auth_json['trust_id'] = trust_id
return auth_json
class AuthTest(tests.TestCase):
def setUp(self):
super(AuthTest, self).setUp()
self.load_backends()
self.load_fixtures(default_fixtures)
# need to register the token provider first because auth controller
# depends on it
token.provider.Manager()
self.context_with_remote_user = {'environment':
{'REMOTE_USER': 'FOO',
'AUTH_TYPE': 'Negotiate'}}
self.empty_context = {'environment': {}}
self.controller = token.controllers.Auth()
#This call sets up, among other things, the call to popen
#that will be used to run the CMS command. These tests were
#passing only due to the global nature of the call. If the
#tests in this file are run alone, API calls return unauthorized.
environment.use_eventlet(monkeypatch_thread=False)
def assertEqualTokens(self, a, b):
"""Assert that two tokens are equal.
Compare two tokens except for their ids. This also truncates
the time in the comparison.
"""
def normalize(token):
token['access']['token']['id'] = 'dummy'
del token['access']['token']['expires']
del token['access']['token']['issued_at']
return token
self.assertCloseEnoughForGovernmentWork(
timeutils.parse_isotime(a['access']['token']['expires']),
timeutils.parse_isotime(b['access']['token']['expires']))
self.assertCloseEnoughForGovernmentWork(
timeutils.parse_isotime(a['access']['token']['issued_at']),
timeutils.parse_isotime(b['access']['token']['issued_at']))
return self.assertDictEqual(normalize(a), normalize(b))
class AuthBadRequests(AuthTest):
def setUp(self):
super(AuthBadRequests, self).setUp()
def test_no_external_auth(self):
"""Verify that _authenticate_external() raises exception if N/A."""
self.assertRaises(
token.controllers.ExternalAuthNotApplicable,
self.controller._authenticate_external,
{}, {})
def test_no_token_in_auth(self):
"""Verify that _authenticate_token() raises exception if no token."""
self.assertRaises(
exception.ValidationError,
self.controller._authenticate_token,
None, {})
def test_no_credentials_in_auth(self):
"""Verify that _authenticate_local() raises exception if no creds."""
self.assertRaises(
exception.ValidationError,
self.controller._authenticate_local,
None, {})
def test_authenticate_blank_request_body(self):
"""Verify sending empty json dict raises the right exception."""
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, {})
def test_authenticate_blank_auth(self):
"""Verify sending blank 'auth' raises the right exception."""
body_dict = _build_user_auth()
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_invalid_auth_content(self):
"""Verify sending invalid 'auth' raises the right exception."""
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, {'auth': 'abcd'})
def test_authenticate_user_id_too_large(self):
"""Verify sending large 'userId' raises the right exception."""
body_dict = _build_user_auth(user_id='0' * 65, username='FOO',
password='foo2')
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_username_too_large(self):
"""Verify sending large 'username' raises the right exception."""
body_dict = _build_user_auth(username='0' * 65, password='foo2')
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_tenant_id_too_large(self):
"""Verify sending large 'tenantId' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_id='0' * 65)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_tenant_name_too_large(self):
"""Verify sending large 'tenantName' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_name='0' * 65)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_token_too_large(self):
"""Verify sending large 'token' raises the right exception."""
body_dict = _build_user_auth(token={'id': '0' * 8193})
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_password_too_large(self):
"""Verify sending large 'password' raises the right exception."""
length = CONF.identity.max_password_length + 1
body_dict = _build_user_auth(username='FOO', password='0' * length)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
class AuthWithToken(AuthTest):
def setUp(self):
super(AuthWithToken, self).setUp()
def test_unscoped_token(self):
"""Verify getting an unscoped token with password creds."""
body_dict = _build_user_auth(username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
self.assertNotIn('tenant', unscoped_token['access']['token'])
def test_auth_invalid_token(self):
"""Verify exception is raised if invalid token."""
body_dict = _build_user_auth(token={"id": uuid.uuid4().hex})
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_bad_formatted_token(self):
"""Verify exception is raised if invalid token."""
body_dict = _build_user_auth(token={})
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_auth_unscoped_token_no_project(self):
"""Verify getting an unscoped token with an unscoped token."""
body_dict = _build_user_auth(
username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
body_dict = _build_user_auth(
token=unscoped_token["access"]["token"])
unscoped_token_2 = self.controller.authenticate({}, body_dict)
self.assertEqualTokens(unscoped_token, unscoped_token_2)
def test_auth_unscoped_token_project(self):
"""Verify getting a token in a tenant with an unscoped token."""
# Add a role in so we can check we get this back
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_member['id'])
# Get an unscoped tenant
body_dict = _build_user_auth(
username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
# Get a token on BAR tenant using the unscoped tenant
body_dict = _build_user_auth(
token=unscoped_token["access"]["token"],
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(self.tenant_bar['id'], tenant["id"])
self.assertEqual(self.role_member['id'], roles[0])
def test_auth_token_project_group_role(self):
"""Verify getting a token in a tenant with group roles."""
# Add a v2 style role in so we can check we get this back
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_member['id'])
# Now create a group role for this user as well
domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(domain1['id'], domain1)
new_group = {'id': uuid.uuid4().hex, 'domain_id': domain1['id'],
'name': uuid.uuid4().hex}
self.identity_api.create_group(new_group['id'], new_group)
self.identity_api.add_user_to_group(self.user_foo['id'],
new_group['id'])
self.assignment_api.create_grant(
group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_admin['id'])
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(self.tenant_bar['id'], tenant["id"])
self.assertIn(self.role_member['id'], roles)
self.assertIn(self.role_admin['id'], roles)
def test_auth_token_cross_domain_group_and_project(self):
"""Verify getting a token in cross domain group/project roles."""
# create domain, project and group and grant roles to user
domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(domain1['id'], domain1)
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain1['id']}
self.assignment_api.create_project(project1['id'], project1)
role_foo_domain1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_foo_domain1['id'],
role_foo_domain1)
role_group_domain1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_group_domain1['id'],
role_group_domain1)
self.assignment_api.add_user_to_project(project1['id'],
self.user_foo['id'])
new_group = {'id': uuid.uuid4().hex, 'domain_id': domain1['id'],
'name': uuid.uuid4().hex}
self.identity_api.create_group(new_group['id'], new_group)
self.identity_api.add_user_to_group(self.user_foo['id'],
new_group['id'])
self.assignment_api.create_grant(
user_id=self.user_foo['id'],
project_id=project1['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(
group_id=new_group['id'],
project_id=project1['id'],
role_id=self.role_admin['id'])
self.assignment_api.create_grant(
user_id=self.user_foo['id'],
domain_id=domain1['id'],
role_id=role_foo_domain1['id'])
self.assignment_api.create_grant(
group_id=new_group['id'],
domain_id=domain1['id'],
role_id=role_group_domain1['id'])
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_name=project1['name'])
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(project1['id'], tenant["id"])
self.assertIn(self.role_member['id'], roles)
self.assertIn(self.role_admin['id'], roles)
self.assertNotIn(role_foo_domain1['id'], roles)
self.assertNotIn(role_group_domain1['id'], roles)
def test_belongs_to_no_tenant(self):
r = self.controller.authenticate(
{},
auth={
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
})
unscoped_token_id = r['access']['token']['id']
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'BAR'}),
token_id=unscoped_token_id)
def test_belongs_to(self):
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
scoped_token_id = scoped_token['access']['token']['id']
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'me'}),
token_id=scoped_token_id)
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'BAR'}),
token_id=scoped_token_id)
def test_token_auth_with_binding(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth()
unscoped_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
# the token should have bind information in it
bind = unscoped_token['access']['token']['bind']
self.assertEqual('FOO', bind['kerberos'])
body_dict = _build_user_auth(
token=unscoped_token['access']['token'],
tenant_name='BAR')
# using unscoped token without remote user context fails
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
self.empty_context, body_dict)
# using token with remote user context succeeds
scoped_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
# the bind information should be carried over from the original token
bind = scoped_token['access']['token']['bind']
self.assertEqual('FOO', bind['kerberos'])
def test_deleting_role_revokes_token(self):
role_controller = assignment.controllers.Role()
project1 = {'id': 'Project1', 'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID}
self.assignment_api.create_project(project1['id'], project1)
role_one = {'id': 'role_one', 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_one['id'], role_one)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project1['id'], role_one['id'])
no_context = {}
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_name=project1['name'])
token = self.controller.authenticate(no_context, body_dict)
# Ensure it is valid
token_id = token['access']['token']['id']
self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=token_id)
# Delete the role, which should invalidate the token
role_controller.delete_role(
dict(is_admin=True, query_string={}), role_one['id'])
# Check the token is now invalid
self.assertRaises(
exception.TokenNotFound,
self.controller.validate_token,
dict(is_admin=True, query_string={}),
token_id=token_id)
class AuthWithPasswordCredentials(AuthTest):
def setUp(self):
super(AuthWithPasswordCredentials, self).setUp()
def test_auth_invalid_user(self):
"""Verify exception is raised if invalid user."""
body_dict = _build_user_auth(
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_valid_user_invalid_password(self):
"""Verify exception is raised if invalid password."""
body_dict = _build_user_auth(
username="FOO",
password=uuid.uuid4().hex)
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_empty_password(self):
"""Verify exception is raised if empty password."""
body_dict = _build_user_auth(
username="FOO",
password="")
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_no_password(self):
"""Verify exception is raised if empty password."""
body_dict = _build_user_auth(username="FOO")
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_blank_password_credentials(self):
"""Sending empty dict as passwordCredentials raises a 400 error."""
body_dict = {'passwordCredentials': {}, 'tenantName': 'demo'}
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_no_username(self):
"""Verify skipping username raises the right exception."""
body_dict = _build_user_auth(password="pass",
tenant_name="demo")
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_bind_without_remote_user(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_name='BAR')
token = self.controller.authenticate({}, body_dict)
self.assertNotIn('bind', token['access']['token'])
def test_change_default_domain_id(self):
# If the default_domain_id config option is not the default then the
# user in auth data is from the new default domain.
# 1) Create a new domain.
new_domain_id = uuid.uuid4().hex
new_domain = {
'description': uuid.uuid4().hex,
'enabled': True,
'id': new_domain_id,
'name': uuid.uuid4().hex,
}
self.assignment_api.create_domain(new_domain_id, new_domain)
# 2) Create user "foo" in new domain with different password than
# default-domain foo.
new_user_id = uuid.uuid4().hex
new_user_password = uuid.uuid4().hex
new_user = {
'id': new_user_id,
'name': self.user_foo['name'],
'domain_id': new_domain_id,
'password': new_user_password,
'email': '[email protected]',
}
self.identity_api.create_user(new_user_id, new_user)
# 3) Update the default_domain_id config option to the new domain
self.config_fixture.config(group='identity',
default_domain_id=new_domain_id)
# 4) Authenticate as "foo" using the password in the new domain.
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=new_user_password)
# The test is successful if this doesn't raise, so no need to assert.
self.controller.authenticate({}, body_dict)
class AuthWithRemoteUser(AuthTest):
def setUp(self):
super(AuthWithRemoteUser, self).setUp()
def test_unscoped_remote_authn(self):
"""Verify getting an unscoped token with external authn."""
body_dict = _build_user_auth(
username='FOO',
password='foo2')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth()
remote_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
self.assertEqualTokens(local_token, remote_token)
def test_unscoped_remote_authn_jsonless(self):
"""Verify that external auth with invalid request fails."""
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{'REMOTE_USER': 'FOO'},
None)
def test_scoped_remote_authn(self):
"""Verify getting a token with external authn."""
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name='BAR')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth(
tenant_name='BAR')
remote_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
self.assertEqualTokens(local_token, remote_token)
def test_scoped_nometa_remote_authn(self):
"""Verify getting a token with external authn and no metadata."""
body_dict = _build_user_auth(
username='TWO',
password='two2',
tenant_name='BAZ')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth(tenant_name='BAZ')
remote_token = self.controller.authenticate(
{'environment': {'REMOTE_USER': 'TWO'}}, body_dict)
self.assertEqualTokens(local_token, remote_token)
def test_scoped_remote_authn_invalid_user(self):
"""Verify that external auth with invalid user fails."""
body_dict = _build_user_auth(tenant_name="BAR")
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{'environment': {'REMOTE_USER': uuid.uuid4().hex}},
body_dict)
def test_bind_with_kerberos(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth(tenant_name="BAR")
token = self.controller.authenticate(self.context_with_remote_user,
body_dict)
self.assertEqual('FOO', token['access']['token']['bind']['kerberos'])
def test_bind_without_config_opt(self):
self.config_fixture.config(group='token', bind=['x509'])
body_dict = _build_user_auth(tenant_name='BAR')
token = self.controller.authenticate(self.context_with_remote_user,
body_dict)
self.assertNotIn('bind', token['access']['token'])
class AuthWithTrust(AuthTest):
def setUp(self):
super(AuthWithTrust, self).setUp()
trust.Manager()
self.trust_controller = trust.controllers.TrustV3()
self.auth_v3_controller = auth.controllers.Auth()
self.trustor = self.user_foo
self.trustee = self.user_two
self.assigned_roles = [self.role_member['id'],
self.role_browser['id']]
for assigned_role in self.assigned_roles:
self.assignment_api.add_role_to_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
self.sample_data = {'trustor_user_id': self.trustor['id'],
'trustee_user_id': self.trustee['id'],
'project_id': self.tenant_bar['id'],
'impersonation': True,
'roles': [{'id': self.role_browser['id']},
{'name': self.role_member['name']}]}
expires_at = timeutils.strtime(timeutils.utcnow() +
datetime.timedelta(minutes=10),
fmt=TIME_FORMAT)
self.create_trust(expires_at=expires_at)
def config_overrides(self):
super(AuthWithTrust, self).config_overrides()
self.config_fixture.config(group='trust', enabled=True)
def _create_auth_context(self, token_id):
token_ref = self.token_api.get_token(token_id)
auth_context = authorization.token_to_auth_context(
token_ref['token_data'])
return {'environment': {authorization.AUTH_CONTEXT_ENV: auth_context},
'token_id': token_id,
'host_url': HOST_URL}
def create_trust(self, expires_at=None, impersonation=True):
username = self.trustor['name']
password = 'foo2'
body_dict = _build_user_auth(username=username, password=password)
self.unscoped_token = self.controller.authenticate({}, body_dict)
context = self._create_auth_context(
self.unscoped_token['access']['token']['id'])
trust_data = copy.deepcopy(self.sample_data)
trust_data['expires_at'] = expires_at
trust_data['impersonation'] = impersonation
self.new_trust = self.trust_controller.create_trust(
context, trust=trust_data)['trust']
def build_v2_token_request(self, username, password):
body_dict = _build_user_auth(username=username, password=password)
self.unscoped_token = self.controller.authenticate({}, body_dict)
unscoped_token_id = self.unscoped_token['access']['token']['id']
request_body = _build_user_auth(token={'id': unscoped_token_id},
trust_id=self.new_trust['id'],
tenant_id=self.tenant_bar['id'])
return request_body
def test_create_trust_bad_data_fails(self):
context = self._create_auth_context(
self.unscoped_token['access']['token']['id'])
bad_sample_data = {'trustor_user_id': self.trustor['id'],
'project_id': self.tenant_bar['id'],
'roles': [{'id': self.role_browser['id']}]}
self.assertRaises(exception.ValidationError,
self.trust_controller.create_trust,
context, trust=bad_sample_data)
def test_create_trust_no_roles(self):
context = {'token_id': self.unscoped_token['access']['token']['id']}
self.sample_data['roles'] = []
self.assertRaises(exception.Forbidden,
self.trust_controller.create_trust,
context, trust=self.sample_data)
def test_create_trust(self):
self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
role_ids = [self.role_browser['id'], self.role_member['id']]
self.assertTrue(timeutils.parse_strtime(self.new_trust['expires_at'],
fmt=TIME_FORMAT))
self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
self.new_trust['links']['self'])
self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
self.new_trust['roles_links']['self'])
for role in self.new_trust['roles']:
self.assertIn(role['id'], role_ids)
def test_create_trust_expires_bad(self):
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust,
expires_at="bad")
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust,
expires_at="")
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust,
expires_at="Z")
def test_get_trust(self):
context = {'token_id': self.unscoped_token['access']['token']['id'],
'host_url': HOST_URL}
trust = self.trust_controller.get_trust(context,
self.new_trust['id'])['trust']
self.assertEqual(self.trustor['id'], trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], trust['trustee_user_id'])
role_ids = [self.role_browser['id'], self.role_member['id']]
for role in self.new_trust['roles']:
self.assertIn(role['id'], role_ids)
def test_create_trust_no_impersonation(self):
self.create_trust(expires_at=None, impersonation=False)
self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
self.assertIs(self.new_trust['impersonation'], False)
auth_response = self.fetch_v2_token_from_trust()
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], self.new_trust['trustee_user_id'])
# TODO(ayoung): Endpoints
def test_create_trust_impersonation(self):
self.create_trust(expires_at=None)
self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
self.assertIs(self.new_trust['impersonation'], True)
auth_response = self.fetch_v2_token_from_trust()
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], self.new_trust['trustor_user_id'])
def test_token_from_trust_wrong_user_fails(self):
request_body = self.build_v2_token_request('FOO', 'foo2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def fetch_v2_token_from_trust(self):
request_body = self.build_v2_token_request('TWO', 'two2')
auth_response = self.controller.authenticate({}, request_body)
return auth_response
def fetch_v3_token_from_trust(self):
v3_password_data = {
'identity': {
"methods": ["password"],
"password": {
"user": {
"id": self.trustee["id"],
"password": self.trustee["password"]}}
},
'scope': {
'project': {
'id': self.tenant_baz['id']}}}
auth_response = (self.auth_v3_controller.authenticate_for_token
({'environment': {},
'query_string': {}},
v3_password_data))
token = auth_response.headers['X-Subject-Token']
v3_req_with_trust = {
"identity": {
"methods": ["token"],
"token": {"id": token}},
"scope": {
"OS-TRUST:trust": {"id": self.new_trust['id']}}}
token_auth_response = (self.auth_v3_controller.authenticate_for_token
({'environment': {},
'query_string': {}},
v3_req_with_trust))
return token_auth_response
def test_create_v3_token_from_trust(self):
auth_response = self.fetch_v3_token_from_trust()
trust_token_user = auth_response.json['token']['user']
self.assertEqual(self.trustor['id'], trust_token_user['id'])
trust_token_trust = auth_response.json['token']['OS-TRUST:trust']
self.assertEqual(trust_token_trust['id'], self.new_trust['id'])
self.assertEqual(self.trustor['id'],
trust_token_trust['trustor_user']['id'])
self.assertEqual(self.trustee['id'],
trust_token_trust['trustee_user']['id'])
trust_token_roles = auth_response.json['token']['roles']
self.assertEqual(2, len(trust_token_roles))
def test_v3_trust_token_get_token_fails(self):
auth_response = self.fetch_v3_token_from_trust()
trust_token = auth_response.headers['X-Subject-Token']
v3_token_data = {'identity': {
'methods': ['token'],
'token': {'id': trust_token}
}}
self.assertRaises(
exception.Forbidden,
self.auth_v3_controller.authenticate_for_token,
{'environment': {},
'query_string': {}}, v3_token_data)
def test_token_from_trust(self):
auth_response = self.fetch_v2_token_from_trust()
self.assertIsNotNone(auth_response)
self.assertEqual(2,
len(auth_response['access']['metadata']['roles']),
"user_foo has three roles, but the token should"
" only get the two roles specified in the trust.")
def assert_token_count_for_trust(self, expected_value):
tokens = self.trust_controller.token_api._list_tokens(
self.trustee['id'], trust_id=self.new_trust['id'])
token_count = len(tokens)
self.assertEqual(expected_value, token_count)
def test_delete_tokens_for_user_invalidates_tokens_from_trust(self):
self.assert_token_count_for_trust(0)
self.fetch_v2_token_from_trust()
self.assert_token_count_for_trust(1)
self.token_api.delete_tokens_for_user(self.trustee['id'])
self.assert_token_count_for_trust(0)
def test_token_from_trust_cant_get_another_token(self):
auth_response = self.fetch_v2_token_from_trust()
trust_token_id = auth_response['access']['token']['id']
request_body = _build_user_auth(token={'id': trust_token_id},
tenant_id=self.tenant_bar['id'])
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_delete_trust_revokes_token(self):
context = self._create_auth_context(
self.unscoped_token['access']['token']['id'])
self.fetch_v2_token_from_trust()
trust_id = self.new_trust['id']
tokens = self.token_api._list_tokens(self.trustor['id'],
trust_id=trust_id)
self.assertEqual(1, len(tokens))
self.trust_controller.delete_trust(context, trust_id=trust_id)
tokens = self.token_api._list_tokens(self.trustor['id'],
trust_id=trust_id)
self.assertEqual(0, len(tokens))
def test_token_from_trust_with_no_role_fails(self):
for assigned_role in self.assigned_roles:
self.assignment_api.remove_role_from_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
request_body = self.build_v2_token_request('TWO', 'two2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_expired_trust_get_token_fails(self):
expiry = "1999-02-18T10:10:00Z"
self.create_trust(expiry)
request_body = self.build_v2_token_request('TWO', 'two2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_token_from_trust_with_wrong_role_fails(self):
self.assignment_api.add_role_to_user_and_project(
self.trustor['id'],
self.tenant_bar['id'],
self.role_other['id'])
for assigned_role in self.assigned_roles:
self.assignment_api.remove_role_from_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
request_body = self.build_v2_token_request('TWO', 'two2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
class TokenExpirationTest(AuthTest):
@mock.patch.object(timeutils, 'utcnow')
def _maintain_token_expiration(self, mock_utcnow):
"""Token expiration should be maintained after re-auth & validation."""
now = datetime.datetime.utcnow()
mock_utcnow.return_value = now
r = self.controller.authenticate(
{},
auth={
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
})
unscoped_token_id = r['access']['token']['id']
original_expiration = r['access']['token']['expires']
mock_utcnow.return_value = now + datetime.timedelta(seconds=1)
r = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=unscoped_token_id)
self.assertEqual(original_expiration, r['access']['token']['expires'])
mock_utcnow.return_value = now + datetime.timedelta(seconds=2)
r = self.controller.authenticate(
{},
auth={
'token': {
'id': unscoped_token_id,
},
'tenantId': self.tenant_bar['id'],
})
scoped_token_id = r['access']['token']['id']
self.assertEqual(original_expiration, r['access']['token']['expires'])
mock_utcnow.return_value = now + datetime.timedelta(seconds=3)
r = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=scoped_token_id)
self.assertEqual(original_expiration, r['access']['token']['expires'])
def test_maintain_uuid_token_expiration(self):
self.config_fixture.config(group='signing', token_format='UUID')
self._maintain_token_expiration()
class AuthCatalog(tests.SQLDriverOverrides, AuthTest):
"""Tests for the catalog provided in the auth response."""
def config_files(self):
config_files = super(AuthCatalog, self).config_files()
# We need to use a backend that supports disabled endpoints, like the
# SQL backend.
config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
return config_files
def _create_endpoints(self):
def create_endpoint(service_id, region, **kwargs):
id_ = uuid.uuid4().hex
ref = {
'id': id_,
'interface': 'public',
'region': region,
'service_id': service_id,
'url': 'http://localhost/%s' % uuid.uuid4().hex,
}
ref.update(kwargs)
self.catalog_api.create_endpoint(id_, ref)
return ref
# Create a service for use with the endpoints.
def create_service(**kwargs):
id_ = uuid.uuid4().hex
ref = {
'id': id_,
'name': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
}
ref.update(kwargs)
self.catalog_api.create_service(id_, ref)
return ref
enabled_service_ref = create_service(enabled=True)
disabled_service_ref = create_service(enabled=False)
region = uuid.uuid4().hex
# Create endpoints
enabled_endpoint_ref = create_endpoint(
enabled_service_ref['id'], region)
create_endpoint(
enabled_service_ref['id'], region, enabled=False,
interface='internal')
create_endpoint(
disabled_service_ref['id'], region)
return enabled_endpoint_ref
def test_auth_catalog_disabled_endpoint(self):
"""On authenticate, get a catalog that excludes disabled endpoints."""
endpoint_ref = self._create_endpoints()
# Authenticate
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
token = self.controller.authenticate({}, body_dict)
# Check the catalog
self.assertEqual(1, len(token['access']['serviceCatalog']))
endpoint = token['access']['serviceCatalog'][0]['endpoints'][0]
self.assertEqual(
1, len(token['access']['serviceCatalog'][0]['endpoints']))
exp_endpoint = {
'id': endpoint_ref['id'],
'publicURL': endpoint_ref['url'],
'region': endpoint_ref['region'],
}
self.assertEqual(exp_endpoint, endpoint)
def test_validate_catalog_disabled_endpoint(self):
"""On validate, get back a catalog that excludes disabled endpoints."""
endpoint_ref = self._create_endpoints()
# Authenticate
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
token = self.controller.authenticate({}, body_dict)
# Validate
token_id = token['access']['token']['id']
validate_ref = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=token_id)
# Check the catalog
self.assertEqual(1, len(token['access']['serviceCatalog']))
endpoint = validate_ref['access']['serviceCatalog'][0]['endpoints'][0]
self.assertEqual(
1, len(token['access']['serviceCatalog'][0]['endpoints']))
exp_endpoint = {
'id': endpoint_ref['id'],
'publicURL': endpoint_ref['url'],
'region': endpoint_ref['region'],
}
self.assertEqual(exp_endpoint, endpoint)
class NonDefaultAuthTest(tests.TestCase):
def test_add_non_default_auth_method(self):
self.config_fixture.config(group='auth',
methods=['password', 'token', 'custom'])
config.setup_authentication()
self.assertTrue(hasattr(CONF.auth, 'custom'))
| apache-2.0 |
Xeralux/tensorflow | tensorflow/contrib/slim/python/slim/nets/inception_v2.py | 96 | 26999 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v2 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def inception_v2_base(inputs,
final_endpoint='Mixed_5c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception v2 (6a2).
Constructs an Inception v2 network from inputs to the given final endpoint.
This method can construct the network up to the layer inception(5b) as
described in http://arxiv.org/abs/1502.03167.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a',
'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b',
'Mixed_5c'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
# Used to find thinned depths for each layer.
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with variable_scope.variable_scope(scope, 'InceptionV2', [inputs]):
with arg_scope(
[
layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d,
layers.separable_conv2d
],
stride=1,
padding='SAME'):
# Note that sizes in the comments below assume an input spatial size of
# 224x224, however, the inputs can be of any size greater 32x32.
# 224 x 224 x 3
end_point = 'Conv2d_1a_7x7'
# depthwise_multiplier here is different from depth_multiplier.
# depthwise_multiplier determines the output channels of the initial
# depthwise conv (see docs for tf.nn.separable_conv2d), while
# depth_multiplier controls the # channels of the subsequent 1x1
# convolution. Must have
# in_channels * depthwise_multipler <= out_channels
# so that the separable convolution is not overparameterized.
depthwise_multiplier = min(int(depth(64) / 3), 8)
net = layers.separable_conv2d(
inputs,
depth(64), [7, 7],
depth_multiplier=depthwise_multiplier,
stride=2,
weights_initializer=trunc_normal(1.0),
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 112 x 112 x 64
end_point = 'MaxPool_2a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 56 x 56 x 64
end_point = 'Conv2d_2b_1x1'
net = layers.conv2d(
net,
depth(64), [1, 1],
scope=end_point,
weights_initializer=trunc_normal(0.1))
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 56 x 56 x 64
end_point = 'Conv2d_2c_3x3'
net = layers.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 56 x 56 x 192
end_point = 'MaxPool_3a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 28 x 28 x 192
# Inception module.
end_point = 'Mixed_3b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(64), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(32), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 28 x 28 x 256
end_point = 'Mixed_3c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 28 x 28 x 320
end_point = 'Mixed_4a'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = layers.conv2d(
branch_0, depth(160), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(128), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4d'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(160), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(160), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4e'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(192), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(192), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_5a'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = layers.conv2d(
branch_0, depth(192), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(256), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = layers.conv2d(
branch_1, depth(256), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v2(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=layers_lib.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV2'):
"""Inception v2 model for classification.
Constructs an Inception v2 network for classification as described in
http://arxiv.org/abs/1502.03167.
The recommended image size used to train this network is 224x224. For image
sizes that differ substantially, it is recommended to use inception_v2_base()
and connect custom final layers to the output.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
Note that input image sizes other than 224x224 might lead to different
spatial dimensions, and hence cannot be squeezed. In this event,
it is best to set spatial_squeeze as False, and perform a reduce_mean
over the resulting spatial dimensions with sizes exceeding 1.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if depth_multiplier <= 0.
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
# Final pooling and prediction
with variable_scope.variable_scope(
scope, 'InceptionV2', [inputs, num_classes], reuse=reuse) as scope:
with arg_scope(
[layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
net, end_points = inception_v2_base(
inputs,
scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier)
with variable_scope.variable_scope('Logits'):
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = layers_lib.avg_pool2d(
net,
kernel_size,
padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
# 1 x 1 x 1024
net = layers_lib.dropout(
net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v2.default_image_size = 224
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [
min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1])
]
return kernel_size_out
def inception_v2_arg_scope(weight_decay=0.00004,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV2 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': ops.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope(
[layers.conv2d],
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers_lib.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
| apache-2.0 |
olysonek/tuned | tests/unit/profiles/test_profile.py | 1 | 1691 | import unittest2
import tuned.profiles
import collections
class MockProfile(tuned.profiles.profile.Profile):
def _create_unit(self, name, config):
return (name, config)
class ProfileTestCase(unittest2.TestCase):
def test_init(self):
MockProfile("test", {})
def test_create_units(self):
profile = MockProfile("test", {
"main": { "anything": 10 },
"network" : { "type": "net", "devices": "*" },
"storage" : { "type": "disk" },
})
self.assertIs(type(profile.units), collections.OrderedDict)
self.assertEqual(len(profile.units), 2)
self.assertListEqual(sorted([name_config for name_config in profile.units]), sorted(["network", "storage"]))
def test_create_units_empty(self):
profile = MockProfile("test", {"main":{}})
self.assertIs(type(profile.units), collections.OrderedDict)
self.assertEqual(len(profile.units), 0)
def test_sets_name(self):
profile1 = MockProfile("test_one", {})
profile2 = MockProfile("test_two", {})
self.assertEqual(profile1.name, "test_one")
self.assertEqual(profile2.name, "test_two")
def test_change_name(self):
profile = MockProfile("oldname", {})
self.assertEqual(profile.name, "oldname")
profile.name = "newname"
self.assertEqual(profile.name, "newname")
def test_sets_options(self):
profile = MockProfile("test", {
"main": { "anything": 10 },
"network" : { "type": "net", "devices": "*" },
})
self.assertIs(type(profile.options), dict)
self.assertEqual(profile.options["anything"], 10)
def test_sets_options_empty(self):
profile = MockProfile("test", {
"storage" : { "type": "disk" },
})
self.assertIs(type(profile.options), dict)
self.assertEqual(len(profile.options), 0)
| gpl-2.0 |
Celedhrim/persomov | libs/enzyme/riff.py | 179 | 20109 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <[email protected]>
# Copyright 2003-2006 Thomas Schueppel <[email protected]>
# Copyright 2003-2006 Dirk Meyer <[email protected]>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['Parser']
import os
import struct
import string
import logging
import time
from exceptions import ParseError
import core
# get logging object
log = logging.getLogger(__name__)
# List of tags
# http://kibus1.narod.ru/frames_eng.htm?sof/abcavi/infotags.htm
# http://www.divx-digest.com/software/avitags_dll.html
# File Format: google for odmlff2.pdf
AVIINFO = {
'INAM': 'title',
'IART': 'artist',
'IPRD': 'product',
'ISFT': 'software',
'ICMT': 'comment',
'ILNG': 'language',
'IKEY': 'keywords',
'IPRT': 'trackno',
'IFRM': 'trackof',
'IPRO': 'producer',
'IWRI': 'writer',
'IGNR': 'genre',
'ICOP': 'copyright'
}
# Taken from libavcodec/mpeg4data.h (pixel_aspect struct)
PIXEL_ASPECT = {
1: (1, 1),
2: (12, 11),
3: (10, 11),
4: (16, 11),
5: (40, 33)
}
class Riff(core.AVContainer):
"""
AVI parser also parsing metadata like title, languages, etc.
"""
table_mapping = { 'AVIINFO' : AVIINFO }
def __init__(self, file):
core.AVContainer.__init__(self)
# read the header
h = file.read(12)
if h[:4] != "RIFF" and h[:4] != 'SDSS':
raise ParseError()
self.has_idx = False
self.header = {}
self.junkStart = None
self.infoStart = None
self.type = h[8:12]
if self.type == 'AVI ':
self.mime = 'video/avi'
elif self.type == 'WAVE':
self.mime = 'audio/wav'
try:
while self._parseRIFFChunk(file):
pass
except IOError:
log.exception(u'error in file, stop parsing')
self._find_subtitles(file.name)
if not self.has_idx and isinstance(self, core.AVContainer):
log.debug(u'WARNING: avi has no index')
self._set('corrupt', True)
def _find_subtitles(self, filename):
"""
Search for subtitle files. Right now only VobSub is supported
"""
base = os.path.splitext(filename)[0]
if os.path.isfile(base + '.idx') and \
(os.path.isfile(base + '.sub') or os.path.isfile(base + '.rar')):
file = open(base + '.idx')
if file.readline().find('VobSub index file') > 0:
for line in file.readlines():
if line.find('id') == 0:
sub = core.Subtitle()
sub.language = line[4:6]
sub.trackno = base + '.idx' # Maybe not?
self.subtitles.append(sub)
file.close()
def _parseAVIH(self, t):
retval = {}
v = struct.unpack('<IIIIIIIIIIIIII', t[0:56])
(retval['dwMicroSecPerFrame'],
retval['dwMaxBytesPerSec'],
retval['dwPaddingGranularity'],
retval['dwFlags'],
retval['dwTotalFrames'],
retval['dwInitialFrames'],
retval['dwStreams'],
retval['dwSuggestedBufferSize'],
retval['dwWidth'],
retval['dwHeight'],
retval['dwScale'],
retval['dwRate'],
retval['dwStart'],
retval['dwLength']) = v
if retval['dwMicroSecPerFrame'] == 0:
log.warning(u'ERROR: Corrupt AVI')
raise ParseError()
return retval
def _parseSTRH(self, t):
retval = {}
retval['fccType'] = t[0:4]
log.debug(u'_parseSTRH(%r) : %d bytes' % (retval['fccType'], len(t)))
if retval['fccType'] != 'auds':
retval['fccHandler'] = t[4:8]
v = struct.unpack('<IHHIIIIIIIII', t[8:52])
(retval['dwFlags'],
retval['wPriority'],
retval['wLanguage'],
retval['dwInitialFrames'],
retval['dwScale'],
retval['dwRate'],
retval['dwStart'],
retval['dwLength'],
retval['dwSuggestedBufferSize'],
retval['dwQuality'],
retval['dwSampleSize'],
retval['rcFrame']) = v
else:
try:
v = struct.unpack('<IHHIIIIIIIII', t[8:52])
(retval['dwFlags'],
retval['wPriority'],
retval['wLanguage'],
retval['dwInitialFrames'],
retval['dwScale'],
retval['dwRate'],
retval['dwStart'],
retval['dwLength'],
retval['dwSuggestedBufferSize'],
retval['dwQuality'],
retval['dwSampleSize'],
retval['rcFrame']) = v
self.delay = float(retval['dwStart']) / \
(float(retval['dwRate']) / retval['dwScale'])
except (KeyError, IndexError, ValueError, ZeroDivisionError):
pass
return retval
def _parseSTRF(self, t, strh):
fccType = strh['fccType']
retval = {}
if fccType == 'auds':
v = struct.unpack('<HHHHHH', t[0:12])
(retval['wFormatTag'],
retval['nChannels'],
retval['nSamplesPerSec'],
retval['nAvgBytesPerSec'],
retval['nBlockAlign'],
retval['nBitsPerSample'],
) = v
ai = core.AudioStream()
ai.samplerate = retval['nSamplesPerSec']
ai.channels = retval['nChannels']
# FIXME: Bitrate calculation is completely wrong.
#ai.samplebits = retval['nBitsPerSample']
#ai.bitrate = retval['nAvgBytesPerSec'] * 8
# TODO: set code if possible
# http://www.stats.uwa.edu.au/Internal/Specs/DXALL/FileSpec/\
# Languages
# ai.language = strh['wLanguage']
ai.codec = retval['wFormatTag']
self.audio.append(ai)
elif fccType == 'vids':
v = struct.unpack('<IIIHH', t[0:16])
(retval['biSize'],
retval['biWidth'],
retval['biHeight'],
retval['biPlanes'],
retval['biBitCount']) = v
v = struct.unpack('IIIII', t[20:40])
(retval['biSizeImage'],
retval['biXPelsPerMeter'],
retval['biYPelsPerMeter'],
retval['biClrUsed'],
retval['biClrImportant']) = v
vi = core.VideoStream()
vi.codec = t[16:20]
vi.width = retval['biWidth']
vi.height = retval['biHeight']
# FIXME: Bitrate calculation is completely wrong.
#vi.bitrate = strh['dwRate']
vi.fps = float(strh['dwRate']) / strh['dwScale']
vi.length = strh['dwLength'] / vi.fps
self.video.append(vi)
return retval
def _parseSTRL(self, t):
retval = {}
size = len(t)
i = 0
while i < len(t) - 8:
key = t[i:i + 4]
sz = struct.unpack('<I', t[i + 4:i + 8])[0]
i += 8
value = t[i:]
if key == 'strh':
retval[key] = self._parseSTRH(value)
elif key == 'strf':
retval[key] = self._parseSTRF(value, retval['strh'])
else:
log.debug(u'_parseSTRL: unsupported stream tag %r', key)
i += sz
return retval, i
def _parseODML(self, t):
retval = {}
size = len(t)
i = 0
key = t[i:i + 4]
sz = struct.unpack('<I', t[i + 4:i + 8])[0]
i += 8
value = t[i:]
if key != 'dmlh':
log.debug(u'_parseODML: Error')
i += sz - 8
return (retval, i)
def _parseVPRP(self, t):
retval = {}
v = struct.unpack('<IIIIIIIIII', t[:4 * 10])
(retval['VideoFormat'],
retval['VideoStandard'],
retval['RefreshRate'],
retval['HTotalIn'],
retval['VTotalIn'],
retval['FrameAspectRatio'],
retval['wPixel'],
retval['hPixel']) = v[1:-1]
# I need an avi with more informations
# enum {FORMAT_UNKNOWN, FORMAT_PAL_SQUARE, FORMAT_PAL_CCIR_601,
# FORMAT_NTSC_SQUARE, FORMAT_NTSC_CCIR_601,...} VIDEO_FORMAT;
# enum {STANDARD_UNKNOWN, STANDARD_PAL, STANDARD_NTSC, STANDARD_SECAM}
# VIDEO_STANDARD;
#
r = retval['FrameAspectRatio']
r = float(r >> 16) / (r & 0xFFFF)
retval['FrameAspectRatio'] = r
if self.video:
map(lambda v: setattr(v, 'aspect', r), self.video)
return (retval, v[0])
def _parseLISTmovi(self, size, file):
"""
Digs into movi list, looking for a Video Object Layer header in an
mpeg4 stream in order to determine aspect ratio.
"""
i = 0
n_dc = 0
done = False
# If the VOL header doesn't appear within 5MB or 5 video chunks,
# give up. The 5MB limit is not likely to apply except in
# pathological cases.
while i < min(1024 * 1024 * 5, size - 8) and n_dc < 5:
data = file.read(8)
if ord(data[0]) == 0:
# Eat leading nulls.
data = data[1:] + file.read(1)
i += 1
key, sz = struct.unpack('<4sI', data)
if key[2:] != 'dc' or sz > 1024 * 500:
# This chunk is not video or is unusually big (> 500KB);
# skip it.
file.seek(sz, 1)
i += 8 + sz
continue
n_dc += 1
# Read video chunk into memory
data = file.read(sz)
#for p in range(0,min(80, sz)):
# print "%02x " % ord(data[p]),
#print "\n\n"
# Look through the picture header for VOL startcode. The basic
# logic for this is taken from libavcodec, h263.c
pos = 0
startcode = 0xff
def bits(v, o, n):
# Returns n bits in v, offset o bits.
return (v & 2 ** n - 1 << (64 - n - o)) >> 64 - n - o
while pos < sz:
startcode = ((startcode << 8) | ord(data[pos])) & 0xffffffff
pos += 1
if startcode & 0xFFFFFF00 != 0x100:
# No startcode found yet
continue
if startcode >= 0x120 and startcode <= 0x12F:
# We have the VOL startcode. Pull 64 bits of it and treat
# as a bitstream
v = struct.unpack(">Q", data[pos : pos + 8])[0]
offset = 10
if bits(v, 9, 1):
# is_ol_id, skip over vo_ver_id and vo_priority
offset += 7
ar_info = bits(v, offset, 4)
if ar_info == 15:
# Extended aspect
num = bits(v, offset + 4, 8)
den = bits(v, offset + 12, 8)
else:
# A standard pixel aspect
num, den = PIXEL_ASPECT.get(ar_info, (0, 0))
# num/den indicates pixel aspect; convert to video aspect,
# so we need frame width and height.
if 0 not in [num, den]:
width, height = self.video[-1].width, self.video[-1].height
self.video[-1].aspect = num / float(den) * width / height
done = True
break
startcode = 0xff
i += 8 + len(data)
if done:
# We have the aspect, no need to continue parsing the movi
# list, so break out of the loop.
break
if i < size:
# Seek past whatever might be remaining of the movi list.
file.seek(size - i, 1)
def _parseLIST(self, t):
retval = {}
i = 0
size = len(t)
while i < size - 8:
# skip zero
if ord(t[i]) == 0: i += 1
key = t[i:i + 4]
sz = 0
if key == 'LIST':
sz = struct.unpack('<I', t[i + 4:i + 8])[0]
i += 8
key = "LIST:" + t[i:i + 4]
value = self._parseLIST(t[i:i + sz])
if key == 'strl':
for k in value.keys():
retval[k] = value[k]
else:
retval[key] = value
i += sz
elif key == 'avih':
sz = struct.unpack('<I', t[i + 4:i + 8])[0]
i += 8
value = self._parseAVIH(t[i:i + sz])
i += sz
retval[key] = value
elif key == 'strl':
i += 4
(value, sz) = self._parseSTRL(t[i:])
key = value['strh']['fccType']
i += sz
retval[key] = value
elif key == 'odml':
i += 4
(value, sz) = self._parseODML(t[i:])
i += sz
elif key == 'vprp':
i += 4
(value, sz) = self._parseVPRP(t[i:])
retval[key] = value
i += sz
elif key == 'JUNK':
sz = struct.unpack('<I', t[i + 4:i + 8])[0]
i += sz + 8
else:
sz = struct.unpack('<I', t[i + 4:i + 8])[0]
i += 8
# in most cases this is some info stuff
if not key in AVIINFO.keys() and key != 'IDIT':
log.debug(u'Unknown Key: %r, len: %d' % (key, sz))
value = t[i:i + sz]
if key == 'ISFT':
# product information
if value.find('\0') > 0:
# works for Casio S500 camera videos
value = value[:value.find('\0')]
value = value.replace('\0', '').lstrip().rstrip()
value = value.replace('\0', '').lstrip().rstrip()
if value:
retval[key] = value
if key in ['IDIT', 'ICRD']:
# Timestamp the video was created. Spec says it
# should be a format like "Wed Jan 02 02:03:55 1990"
# Casio S500 uses "2005/12/24/ 14:11", but I've
# also seen "December 24, 2005"
specs = ('%a %b %d %H:%M:%S %Y', '%Y/%m/%d/ %H:%M', '%B %d, %Y')
for tmspec in specs:
try:
tm = time.strptime(value, tmspec)
# save timestamp as int
self.timestamp = int(time.mktime(tm))
break
except ValueError:
pass
else:
log.debug(u'no support for time format %r', value)
i += sz
return retval
def _parseRIFFChunk(self, file):
h = file.read(8)
if len(h) < 8:
return False
name = h[:4]
size = struct.unpack('<I', h[4:8])[0]
if name == 'LIST':
pos = file.tell() - 8
key = file.read(4)
if key == 'movi' and self.video and not self.video[-1].aspect and \
self.video[-1].width and self.video[-1].height and \
self.video[-1].format in ['DIVX', 'XVID', 'FMP4']: # any others?
# If we don't have the aspect (i.e. it isn't in odml vprp
# header), but we do know the video's dimensions, and
# we're dealing with an mpeg4 stream, try to get the aspect
# from the VOL header in the mpeg4 stream.
self._parseLISTmovi(size - 4, file)
return True
elif size > 80000:
log.debug(u'RIFF LIST %r too long to parse: %r bytes' % (key, size))
t = file.seek(size - 4, 1)
return True
elif size < 5:
log.debug(u'RIFF LIST %r too short: %r bytes' % (key, size))
return True
t = file.read(size - 4)
log.debug(u'parse RIFF LIST %r: %d bytes' % (key, size))
value = self._parseLIST(t)
self.header[key] = value
if key == 'INFO':
self.infoStart = pos
self._appendtable('AVIINFO', value)
elif key == 'MID ':
self._appendtable('AVIMID', value)
elif key == 'hdrl':
# no need to add this info to a table
pass
else:
log.debug(u'Skipping table info %r' % key)
elif name == 'JUNK':
self.junkStart = file.tell() - 8
self.junkSize = size
file.seek(size, 1)
elif name == 'idx1':
self.has_idx = True
log.debug(u'idx1: %r bytes' % size)
# no need to parse this
t = file.seek(size, 1)
elif name == 'RIFF':
log.debug(u'New RIFF chunk, extended avi [%i]' % size)
type = file.read(4)
if type != 'AVIX':
log.debug(u'Second RIFF chunk is %r, not AVIX, skipping', type)
file.seek(size - 4, 1)
# that's it, no new informations should be in AVIX
return False
elif name == 'fmt ' and size <= 50:
# This is a wav file.
data = file.read(size)
fmt = struct.unpack("<HHLLHH", data[:16])
self._set('codec', hex(fmt[0]))
self._set('samplerate', fmt[2])
# fmt[3] is average bytes per second, so we must divide it
# by 125 to get kbits per second
self._set('bitrate', fmt[3] / 125)
# ugly hack: remember original rate in bytes per second
# so that the length can be calculated in next elif block
self._set('byterate', fmt[3])
# Set a dummy fourcc so codec will be resolved in finalize.
self._set('fourcc', 'dummy')
elif name == 'data':
# XXX: this is naive and may not be right. For example if the
# stream is something that supports VBR like mp3, the value
# will be off. The only way to properly deal with this issue
# is to decode part of the stream based on its codec, but
# kaa.metadata doesn't have this capability (yet?)
# ugly hack: use original rate in bytes per second
self._set('length', size / float(self.byterate))
file.seek(size, 1)
elif not name.strip(string.printable + string.whitespace):
# check if name is something usefull at all, maybe it is no
# avi or broken
t = file.seek(size, 1)
log.debug(u'Skipping %r [%i]' % (name, size))
else:
# bad avi
log.debug(u'Bad or broken avi')
return False
return True
Parser = Riff
| gpl-3.0 |
AnselCmy/ARPS | report_crawler/report_crawler/spiders/spiders_001/_B/BNU001.py | 2 | 1386 | # -*- coding:utf-8 -*-
import scrapy
from report_crawler.spiders.__Global_function import get_localtime
from report_crawler.spiders.__Global_variable import now_time, end_time
class BNU001_Spider(scrapy.Spider):
name = 'BNU001'
start_urls = ['http://cist.bnu.edu.cn/tzgg/index.html']
domain = 'http://cist.bnu.edu.cn/tzgg/'
counts = 0
def parse(self, response):
messages = response.xpath("//div[@class='twelve columns alpha']/ul/li")
for i, message in enumerate(messages):
report_name = message.xpath(".//a/@title").extract()[0]
if u"【预告】" not in report_name or u"论坛" in report_name:
continue
report_time = get_localtime(message.xpath("span/text()").extract()[0])
if report_time > end_time:
continue
if report_time < now_time:
return
report_url = self.domain + message.xpath(".//a/@href").extract()[0]
yield scrapy.Request(report_url, callback=self.parse_pages,
meta={'link': report_url, 'number': i + 1, 'publication': report_time})
def parse_pages(self, response):
messages = response.xpath("//div[@class='heading']")
return {'text': messages, 'number': response.meta['number'], 'organizer': u"北京师范大学信息科学与技术学院",
'faculty': self.name, 'link': response.meta['link'], 'publication': response.meta['publication'],
'location': u"华北:北京市"}
| mit |
atgreen/bitcoin | qa/rpc-tests/smartfees.py | 131 | 12419 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test fee estimation code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many many transactions without needing to spend
# time signing.
P2SH_1 = "2MySexEGVzZpRgNQ1JdjdP5bRETznm3roQ2" # P2SH of "OP_1 OP_DROP"
P2SH_2 = "2NBdpwq8Aoo1EEKEXPNrKvr5xQr3M9UfcZA" # P2SH of "OP_2 OP_DROP"
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
# 4 bytes of OP_TRUE and push 2-byte redeem script of "OP_1 OP_DROP" or "OP_2 OP_DROP"
SCRIPT_SIG = ["0451025175", "0451025275"]
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
'''
Create and send a transaction with a random fee.
The transaction pays to a trival P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)
'''
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment)*(1.1892**random.randint(0,28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
inputs = []
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} )
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} )
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in))
outputs = {}
outputs[P2SH_1] = total_in - amount - fee
outputs[P2SH_2] = amount
rawtx = from_node.createrawtransaction(inputs, outputs)
# Createrawtransaction constructions a transaction that is ready to be signed
# These transactions don't need to be signed, but we still have to insert the ScriptSig
# that will satisfy the ScriptPubKey.
completetx = rawtx[0:10]
inputnum = 0
for inp in inputs:
completetx += rawtx[10+82*inputnum:82+82*inputnum]
completetx += SCRIPT_SIG[inp["vout"]]
completetx += rawtx[84+82*inputnum:92+82*inputnum]
inputnum += 1
completetx += rawtx[10+82*inputnum:]
txid = from_node.sendrawtransaction(completetx, True)
unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee})
unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount})
return (completetx, fee)
def split_inputs(from_node, txins, txouts, initial_split = False):
'''
We need to generate a lot of very small inputs so we can generate a ton of transactions
and they will have low priority.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
'''
prevtxout = txins.pop()
inputs = []
outputs = {}
inputs.append({ "txid" : prevtxout["txid"], "vout" : prevtxout["vout"] })
half_change = satoshi_round(prevtxout["amount"]/2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
outputs[P2SH_1] = half_change
outputs[P2SH_2] = rem_change
rawtx = from_node.createrawtransaction(inputs, outputs)
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the property ScriptSig
if (initial_split) :
completetx = from_node.signrawtransaction(rawtx)["hex"]
else :
completetx = rawtx[0:82] + SCRIPT_SIG[prevtxout["vout"]] + rawtx[84:]
txid = from_node.sendrawtransaction(completetx, True)
txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change})
txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change})
def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
'''
This function calls estimatefee and verifies that the estimates
meet certain invariants.
'''
all_estimates = [ node.estimatefee(i) for i in range(1,26) ]
if print_estimates:
print([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]])
delta = 1.0e-6 # account for rounding error
last_e = max(fees_seen)
for e in filter(lambda x: x >= 0, all_estimates):
# Estimates should be within the bounds of what transactions fees actually were:
if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
%(float(e), min(fees_seen), max(fees_seen)))
# Estimates should be monotonically decreasing
if float(e)-delta > last_e:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
%(float(e),float(last_e)))
last_e = e
valid_estimate = False
invalid_estimates = 0
for e in all_estimates:
if e >= 0:
valid_estimate = True
else:
invalid_estimates += 1
# Once we're at a high enough confirmation count that we can give an estimate
# We should have estimates for all higher confirmation counts
if valid_estimate and e < 0:
raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
# Check on the expected number of different confirmation counts
# that we might not have valid estimates for
if invalid_estimates > max_invalid:
raise AssertionError("More than (%d) invalid estimates"%(max_invalid))
return all_estimates
class EstimateFeeTest(BitcoinTestFramework):
def setup_network(self):
'''
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of small low priority outputs
which we will use to generate our transactions.
'''
self.nodes = []
# Use node0 to mine blocks for input splitting
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
"-relaypriority=0", "-whitelist=127.0.0.1"]))
print("This test is time consuming, please be patient")
print("Splitting inputs to small size so we can generate low priority tx's")
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while (reps < 5):
#Double txouts to txouts2
while (len(self.txouts)>0):
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
#Double txouts2 to txouts
while (len(self.txouts2)>0):
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
reps += 1
print("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
# Node1 mines small blocks but that are bigger than the expected transaction rate,
# and allows free transactions.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# (17k is room enough for 110 or so transactions)
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockprioritysize=1500", "-blockmaxsize=18000",
"-maxorphantx=1000", "-relaypriority=0", "-debug=estimatefee"]))
connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 70 or so transactions)
node2args = ["-blockprioritysize=0", "-blockmaxsize=12000", "-maxorphantx=1000", "-relaypriority=0"]
self.nodes.append(start_node(2, self.options.tmpdir, node2args))
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[2], 1)
self.is_network_split = False
self.sync_all()
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for i in range(numblocks):
random.shuffle(self.confutxo)
for j in range(random.randrange(100-50,100+50)):
from_index = random.randint(1,2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex)/2)/1000.0
self.fees_per_kb.append(float(fee)/tx_kbytes)
sync_mempools(self.nodes[0:3],.1)
mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"]
sync_blocks(self.nodes[0:3],.1)
#update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
print("Checking estimates for 1/2/3/6/15/25 blocks")
print("Creating transactions and mining them with a huge block size")
# Create transactions and mine 20 big blocks with node 0 such that the mempool is always emptied
self.transact_and_mine(30, self.nodes[0])
check_estimates(self.nodes[1], self.fees_per_kb, 1)
print("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 30 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(20, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb, 3)
print("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 40 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(40, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb, 2)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
sync_blocks(self.nodes[0:3],.1)
print("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb, 2)
if __name__ == '__main__':
EstimateFeeTest().main()
| mit |
ddelemeny/calligra | 3rdparty/google-breakpad/src/tools/gyp/test/lib/TestCmd.py | 330 | 52544 | """
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing
of executable commands and scripts (in any language, not just Python),
especially commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd
module manages and cleans up one or more temporary workspace
directories, and provides methods for creating files and directories in
those workspace directories from in-line data, here-documents), allowing
tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
import TestCmd
test = TestCmd.TestCmd()
There are a bunch of keyword arguments available at instantiation:
test = TestCmd.TestCmd(description = 'string',
program = 'program_or_script_to_test',
interpreter = 'script_interpreter',
workdir = 'prefix',
subdir = 'subdir',
verbose = Boolean,
match = default_match_function,
diff = default_diff_function,
combine = Boolean)
There are a bunch of methods that let you do different things:
test.verbose_set(1)
test.description_set('string')
test.program_set('program_or_script_to_test')
test.interpreter_set('script_interpreter')
test.interpreter_set(['script_interpreter', 'arg'])
test.workdir_set('prefix')
test.workdir_set('')
test.workpath('file')
test.workpath('subdir', 'file')
test.subdir('subdir', ...)
test.rmdir('subdir', ...)
test.write('file', "contents\n")
test.write(['subdir', 'file'], "contents\n")
test.read('file')
test.read(['subdir', 'file'])
test.read('file', mode)
test.read(['subdir', 'file'], mode)
test.writable('dir', 1)
test.writable('dir', None)
test.preserve(condition, ...)
test.cleanup(condition)
test.command_args(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program')
test.run(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
chdir = 'directory_to_chdir_to',
stdin = 'input to feed to the program\n')
universal_newlines = True)
p = test.start(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
universal_newlines = None)
test.finish(self, p)
test.pass_test()
test.pass_test(condition)
test.pass_test(condition, function)
test.fail_test()
test.fail_test(condition)
test.fail_test(condition, function)
test.fail_test(condition, function, skip)
test.no_result()
test.no_result(condition)
test.no_result(condition, function)
test.no_result(condition, function, skip)
test.stdout()
test.stdout(run)
test.stderr()
test.stderr(run)
test.symlink(target, link)
test.banner(string)
test.banner(string, width)
test.diff(actual, expected)
test.match(actual, expected)
test.match_exact("actual 1\nactual 2\n", "expected 1\nexpected 2\n")
test.match_exact(["actual 1\n", "actual 2\n"],
["expected 1\n", "expected 2\n"])
test.match_re("actual 1\nactual 2\n", regex_string)
test.match_re(["actual 1\n", "actual 2\n"], list_of_regexes)
test.match_re_dotall("actual 1\nactual 2\n", regex_string)
test.match_re_dotall(["actual 1\n", "actual 2\n"], list_of_regexes)
test.tempdir()
test.tempdir('temporary-directory')
test.sleep()
test.sleep(seconds)
test.where_is('foo')
test.where_is('foo', 'PATH1:PATH2')
test.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
test.unlink('file')
test.unlink('subdir', 'file')
The TestCmd module provides pass_test(), fail_test(), and no_result()
unbound functions that report test results for use with the Aegis change
management system. These methods terminate the test immediately,
reporting PASSED, FAILED, or NO RESULT respectively, and exiting with
status 0 (success), 1 or 2 respectively. This allows for a distinction
between an actual failed test and a test that could not be properly
evaluated because of an external condition (such as a full file system
or incorrect permissions).
import TestCmd
TestCmd.pass_test()
TestCmd.pass_test(condition)
TestCmd.pass_test(condition, function)
TestCmd.fail_test()
TestCmd.fail_test(condition)
TestCmd.fail_test(condition, function)
TestCmd.fail_test(condition, function, skip)
TestCmd.no_result()
TestCmd.no_result(condition)
TestCmd.no_result(condition, function)
TestCmd.no_result(condition, function, skip)
The TestCmd module also provides unbound functions that handle matching
in the same way as the match_*() methods described above.
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_exact)
test = TestCmd.TestCmd(match = TestCmd.match_re)
test = TestCmd.TestCmd(match = TestCmd.match_re_dotall)
The TestCmd module provides unbound functions that can be used for the
"diff" argument to TestCmd.TestCmd instantiation:
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_re,
diff = TestCmd.diff_re)
test = TestCmd.TestCmd(diff = TestCmd.simple_diff)
The "diff" argument can also be used with standard difflib functions:
import difflib
test = TestCmd.TestCmd(diff = difflib.context_diff)
test = TestCmd.TestCmd(diff = difflib.unified_diff)
Lastly, the where_is() method also exists in an unbound function
version.
import TestCmd
TestCmd.where_is('foo')
TestCmd.where_is('foo', 'PATH1:PATH2')
TestCmd.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
"""
# Copyright 2000-2010 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCmd.py 0.37.D001 2010/01/11 16:55:50 knight"
__version__ = "0.37"
import errno
import os
import os.path
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import traceback
import types
import UserList
__all__ = [
'diff_re',
'fail_test',
'no_result',
'pass_test',
'match_exact',
'match_re',
'match_re_dotall',
'python_executable',
'TestCmd'
]
try:
import difflib
except ImportError:
__all__.append('simple_diff')
def is_List(e):
return type(e) is types.ListType \
or isinstance(e, UserList.UserList)
try:
from UserString import UserString
except ImportError:
class UserString:
pass
if hasattr(types, 'UnicodeType'):
def is_String(e):
return type(e) is types.StringType \
or type(e) is types.UnicodeType \
or isinstance(e, UserString)
else:
def is_String(e):
return type(e) is types.StringType or isinstance(e, UserString)
tempfile.template = 'testcmd.'
if os.name in ('posix', 'nt'):
tempfile.template = 'testcmd.' + str(os.getpid()) + '.'
else:
tempfile.template = 'testcmd.'
re_space = re.compile('\s')
_Cleanup = []
_chain_to_exitfunc = None
def _clean():
global _Cleanup
cleanlist = filter(None, _Cleanup)
del _Cleanup[:]
cleanlist.reverse()
for test in cleanlist:
test.cleanup()
if _chain_to_exitfunc:
_chain_to_exitfunc()
try:
import atexit
except ImportError:
# TODO(1.5): atexit requires python 2.0, so chain sys.exitfunc
try:
_chain_to_exitfunc = sys.exitfunc
except AttributeError:
pass
sys.exitfunc = _clean
else:
atexit.register(_clean)
try:
zip
except NameError:
def zip(*lists):
result = []
for i in xrange(min(map(len, lists))):
result.append(tuple(map(lambda l, i=i: l[i], lists)))
return result
class Collector:
def __init__(self, top):
self.entries = [top]
def __call__(self, arg, dirname, names):
pathjoin = lambda n, d=dirname: os.path.join(d, n)
self.entries.extend(map(pathjoin, names))
def _caller(tblist, skip):
string = ""
arr = []
for file, line, name, text in tblist:
if file[-10:] == "TestCmd.py":
break
arr = [(file, line, name, text)] + arr
atfrom = "at"
for file, line, name, text in arr[skip:]:
if name in ("?", "<module>"):
name = ""
else:
name = " (" + name + ")"
string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
atfrom = "\tfrom"
return string
def fail_test(self = None, condition = 1, function = None, skip = 0):
"""Cause the test to fail.
By default, the fail_test() method reports that the test FAILED
and exits with a status of 1. If a condition argument is supplied,
the test fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("FAILED test" + of + desc + sep + at)
sys.exit(1)
def no_result(self = None, condition = 1, function = None, skip = 0):
"""Causes a test to exit with no valid result.
By default, the no_result() method reports NO RESULT for the test
and exits with a status of 2. If a condition argument is supplied,
the test fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
if os.environ.get('TESTCMD_DEBUG_SKIPS'):
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
else:
sys.stderr.write("NO RESULT\n")
sys.exit(2)
def pass_test(self = None, condition = 1, function = None):
"""Causes a test to pass.
By default, the pass_test() method reports PASSED for the test
and exits with a status of 0. If a condition argument is supplied,
the test passes only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
sys.stderr.write("PASSED\n")
sys.exit(0)
def match_exact(lines = None, matches = None):
"""
"""
if not is_List(lines):
lines = string.split(lines, "\n")
if not is_List(matches):
matches = string.split(matches, "\n")
if len(lines) != len(matches):
return
for i in range(len(lines)):
if lines[i] != matches[i]:
return
return 1
def match_re(lines = None, res = None):
"""
"""
if not is_List(lines):
lines = string.split(lines, "\n")
if not is_List(res):
res = string.split(res, "\n")
if len(lines) != len(res):
return
for i in range(len(lines)):
s = "^" + res[i] + "$"
try:
expr = re.compile(s)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if not expr.search(lines[i]):
return
return 1
def match_re_dotall(lines = None, res = None):
"""
"""
if not type(lines) is type(""):
lines = string.join(lines, "\n")
if not type(res) is type(""):
res = string.join(res, "\n")
s = "^" + res + "$"
try:
expr = re.compile(s, re.DOTALL)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if expr.match(lines):
return 1
try:
import difflib
except ImportError:
pass
else:
def simple_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A function with the same calling signature as difflib.context_diff
(diff -c) and difflib.unified_diff (diff -u) but which prints
output like the simple, unadorned 'diff" command.
"""
sm = difflib.SequenceMatcher(None, a, b)
def comma(x1, x2):
return x1+1 == x2 and str(x2) or '%s,%s' % (x1+1, x2)
result = []
for op, a1, a2, b1, b2 in sm.get_opcodes():
if op == 'delete':
result.append("%sd%d" % (comma(a1, a2), b1))
result.extend(map(lambda l: '< ' + l, a[a1:a2]))
elif op == 'insert':
result.append("%da%s" % (a1, comma(b1, b2)))
result.extend(map(lambda l: '> ' + l, b[b1:b2]))
elif op == 'replace':
result.append("%sc%s" % (comma(a1, a2), comma(b1, b2)))
result.extend(map(lambda l: '< ' + l, a[a1:a2]))
result.append('---')
result.extend(map(lambda l: '> ' + l, b[b1:b2]))
return result
def diff_re(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A simple "diff" of two sets of lines when the expected lines
are regular expressions. This is a really dumb thing that
just compares each line in turn, so it doesn't look for
chunks of matching lines and the like--but at least it lets
you know exactly which line first didn't compare correctl...
"""
result = []
diff = len(a) - len(b)
if diff < 0:
a = a + ['']*(-diff)
elif diff > 0:
b = b + ['']*diff
i = 0
for aline, bline in zip(a, b):
s = "^" + aline + "$"
try:
expr = re.compile(s)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if not expr.search(bline):
result.append("%sc%s" % (i+1, i+1))
result.append('< ' + repr(a[i]))
result.append('---')
result.append('> ' + repr(b[i]))
i = i+1
return result
if os.name == 'java':
python_executable = os.path.join(sys.prefix, 'jython')
else:
python_executable = sys.executable
if sys.platform == 'win32':
default_sleep_seconds = 2
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = string.split(path, os.pathsep)
if pathext is None:
pathext = os.environ['PATHEXT']
if is_String(pathext):
pathext = string.split(pathext, os.pathsep)
for ext in pathext:
if string.lower(ext) == string.lower(file[-len(ext):]):
pathext = ['']
break
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
else:
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = string.split(path, os.pathsep)
for dir in path:
f = os.path.join(dir, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
return f
return None
default_sleep_seconds = 1
try:
import subprocess
except ImportError:
# The subprocess module doesn't exist in this version of Python,
# so we're going to cobble up something that looks just enough
# like its API for our purposes below.
import new
subprocess = new.module('subprocess')
subprocess.PIPE = 'PIPE'
subprocess.STDOUT = 'STDOUT'
subprocess.mswindows = (sys.platform == 'win32')
try:
import popen2
popen2.Popen3
except AttributeError:
class Popen3:
universal_newlines = 1
def __init__(self, command, **kw):
if sys.platform == 'win32' and command[0] == '"':
command = '"' + command + '"'
(stdin, stdout, stderr) = os.popen3(' ' + command)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def close_output(self):
self.stdout.close()
self.resultcode = self.stderr.close()
def wait(self):
resultcode = self.resultcode
if os.WIFEXITED(resultcode):
return os.WEXITSTATUS(resultcode)
elif os.WIFSIGNALED(resultcode):
return os.WTERMSIG(resultcode)
else:
return None
else:
try:
popen2.Popen4
except AttributeError:
# A cribbed Popen4 class, with some retrofitted code from
# the Python 1.5 Popen3 class methods to do certain things
# by hand.
class Popen4(popen2.Popen3):
childerr = None
def __init__(self, cmd, bufsize=-1):
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
self.pid = os.fork()
if self.pid == 0:
# Child
os.dup2(p2cread, 0)
os.dup2(c2pwrite, 1)
os.dup2(c2pwrite, 2)
for i in range(3, popen2.MAXFD):
try:
os.close(i)
except: pass
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
# Shouldn't come here, I guess
os._exit(1)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
popen2._active.append(self)
popen2.Popen4 = Popen4
class Popen3(popen2.Popen3, popen2.Popen4):
universal_newlines = 1
def __init__(self, command, **kw):
if kw.get('stderr') == 'STDOUT':
apply(popen2.Popen4.__init__, (self, command, 1))
else:
apply(popen2.Popen3.__init__, (self, command, 1))
self.stdin = self.tochild
self.stdout = self.fromchild
self.stderr = self.childerr
def wait(self, *args, **kw):
resultcode = apply(popen2.Popen3.wait, (self,)+args, kw)
if os.WIFEXITED(resultcode):
return os.WEXITSTATUS(resultcode)
elif os.WIFSIGNALED(resultcode):
return os.WTERMSIG(resultcode)
else:
return None
subprocess.Popen = Popen3
# From Josiah Carlson,
# ASPN : Python Cookbook : Module to allow Asynchronous subprocess use on Windows and Posix platforms
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440554
PIPE = subprocess.PIPE
if subprocess.mswindows:
from win32file import ReadFile, WriteFile
from win32pipe import PeekNamedPipe
import msvcrt
else:
import select
import fcntl
try: fcntl.F_GETFL
except AttributeError: fcntl.F_GETFL = 3
try: fcntl.F_SETFL
except AttributeError: fcntl.F_SETFL = 4
class Popen(subprocess.Popen):
def recv(self, maxsize=None):
return self._recv('stdout', maxsize)
def recv_err(self, maxsize=None):
return self._recv('stderr', maxsize)
def send_recv(self, input='', maxsize=None):
return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
def get_conn_maxsize(self, which, maxsize):
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return getattr(self, which), maxsize
def _close(self, which):
getattr(self, which).close()
setattr(self, which, None)
if subprocess.mswindows:
def send(self, input):
if not self.stdin:
return None
try:
x = msvcrt.get_osfhandle(self.stdin.fileno())
(errCode, written) = WriteFile(x, input)
except ValueError:
return self._close('stdin')
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
x = msvcrt.get_osfhandle(conn.fileno())
(read, nAvail, nMessage) = PeekNamedPipe(x, 0)
if maxsize < nAvail:
nAvail = maxsize
if nAvail > 0:
(errCode, read) = ReadFile(x, nAvail, None)
except ValueError:
return self._close(which)
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
#if self.universal_newlines:
# read = self._translate_newlines(read)
return read
else:
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(), input)
except OSError, why:
if why[0] == errno.EPIPE: #broken pipe
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
except TypeError:
flags = None
else:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags| os.O_NONBLOCK)
try:
if not select.select([conn], [], [], 0)[0]:
return ''
r = conn.read(maxsize)
if not r:
return self._close(which)
#if self.universal_newlines:
# r = self._translate_newlines(r)
return r
finally:
if not conn.closed and not flags is None:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
disconnect_message = "Other end disconnected!"
def recv_some(p, t=.1, e=1, tr=5, stderr=0):
if tr < 1:
tr = 1
x = time.time()+t
y = []
r = ''
pr = p.recv
if stderr:
pr = p.recv_err
while time.time() < x or r:
r = pr()
if r is None:
if e:
raise Exception(disconnect_message)
else:
break
elif r:
y.append(r)
else:
time.sleep(max((x-time.time())/tr, 0))
return ''.join(y)
# TODO(3.0: rewrite to use memoryview()
def send_all(p, data):
while len(data):
sent = p.send(data)
if sent is None:
raise Exception(disconnect_message)
data = buffer(data, sent)
try:
object
except NameError:
class object:
pass
class TestCmd(object):
"""Class TestCmd
"""
def __init__(self, description = None,
program = None,
interpreter = None,
workdir = None,
subdir = None,
verbose = None,
match = None,
diff = None,
combine = 0,
universal_newlines = 1):
self._cwd = os.getcwd()
self.description_set(description)
self.program_set(program)
self.interpreter_set(interpreter)
if verbose is None:
try:
verbose = max( 0, int(os.environ.get('TESTCMD_VERBOSE', 0)) )
except ValueError:
verbose = 0
self.verbose_set(verbose)
self.combine = combine
self.universal_newlines = universal_newlines
if match is not None:
self.match_function = match
else:
self.match_function = match_re
if diff is not None:
self.diff_function = diff
else:
try:
difflib
except NameError:
pass
else:
self.diff_function = simple_diff
#self.diff_function = difflib.context_diff
#self.diff_function = difflib.unified_diff
self._dirlist = []
self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
if os.environ.has_key('PRESERVE') and not os.environ['PRESERVE'] is '':
self._preserve['pass_test'] = os.environ['PRESERVE']
self._preserve['fail_test'] = os.environ['PRESERVE']
self._preserve['no_result'] = os.environ['PRESERVE']
else:
try:
self._preserve['pass_test'] = os.environ['PRESERVE_PASS']
except KeyError:
pass
try:
self._preserve['fail_test'] = os.environ['PRESERVE_FAIL']
except KeyError:
pass
try:
self._preserve['no_result'] = os.environ['PRESERVE_NO_RESULT']
except KeyError:
pass
self._stdout = []
self._stderr = []
self.status = None
self.condition = 'no_result'
self.workdir_set(workdir)
self.subdir(subdir)
def __del__(self):
self.cleanup()
def __repr__(self):
return "%x" % id(self)
banner_char = '='
banner_width = 80
def banner(self, s, width=None):
if width is None:
width = self.banner_width
return s + self.banner_char * (width - len(s))
if os.name == 'posix':
def escape(self, arg):
"escape shell special characters"
slash = '\\'
special = '"$'
arg = string.replace(arg, slash, slash+slash)
for c in special:
arg = string.replace(arg, c, slash+c)
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
else:
# Windows does not allow special characters in file names
# anyway, so no need for an escape function, we will just quote
# the arg.
def escape(self, arg):
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
def canonicalize(self, path):
if is_List(path):
path = apply(os.path.join, tuple(path))
if not os.path.isabs(path):
path = os.path.join(self.workdir, path)
return path
def chmod(self, path, mode):
"""Changes permissions on the specified file or directory
path name."""
path = self.canonicalize(path)
os.chmod(path, mode)
def cleanup(self, condition = None):
"""Removes any temporary working directories for the specified
TestCmd environment. If the environment variable PRESERVE was
set when the TestCmd environment was created, temporary working
directories are not removed. If any of the environment variables
PRESERVE_PASS, PRESERVE_FAIL, or PRESERVE_NO_RESULT were set
when the TestCmd environment was created, then temporary working
directories are not removed if the test passed, failed, or had
no result, respectively. Temporary working directories are also
preserved for conditions specified via the preserve method.
Typically, this method is not called directly, but is used when
the script exits to clean up temporary working directories as
appropriate for the exit status.
"""
if not self._dirlist:
return
os.chdir(self._cwd)
self.workdir = None
if condition is None:
condition = self.condition
if self._preserve[condition]:
for dir in self._dirlist:
print "Preserved directory", dir
else:
list = self._dirlist[:]
list.reverse()
for dir in list:
self.writable(dir, 1)
shutil.rmtree(dir, ignore_errors = 1)
self._dirlist = []
try:
global _Cleanup
_Cleanup.remove(self)
except (AttributeError, ValueError):
pass
def command_args(self, program = None,
interpreter = None,
arguments = None):
if program:
if type(program) == type('') and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
else:
program = self.program
if not interpreter:
interpreter = self.interpreter
if not type(program) in [type([]), type(())]:
program = [program]
cmd = list(program)
if interpreter:
if not type(interpreter) in [type([]), type(())]:
interpreter = [interpreter]
cmd = list(interpreter) + cmd
if arguments:
if type(arguments) == type(''):
arguments = string.split(arguments)
cmd.extend(arguments)
return cmd
def description_set(self, description):
"""Set the description of the functionality being tested.
"""
self.description = description
try:
difflib
except NameError:
def diff(self, a, b, name, *args, **kw):
print self.banner('Expected %s' % name)
print a
print self.banner('Actual %s' % name)
print b
else:
def diff(self, a, b, name, *args, **kw):
print self.banner(name)
args = (a.splitlines(), b.splitlines()) + args
lines = apply(self.diff_function, args, kw)
for l in lines:
print l
def fail_test(self, condition = 1, function = None, skip = 0):
"""Cause the test to fail.
"""
if not condition:
return
self.condition = 'fail_test'
fail_test(self = self,
condition = condition,
function = function,
skip = skip)
def interpreter_set(self, interpreter):
"""Set the program to be used to interpret the program
under test as a script.
"""
self.interpreter = interpreter
def match(self, lines, matches):
"""Compare actual and expected file contents.
"""
return self.match_function(lines, matches)
def match_exact(self, lines, matches):
"""Compare actual and expected file contents.
"""
return match_exact(lines, matches)
def match_re(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re(lines, res)
def match_re_dotall(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re_dotall(lines, res)
def no_result(self, condition = 1, function = None, skip = 0):
"""Report that the test could not be run.
"""
if not condition:
return
self.condition = 'no_result'
no_result(self = self,
condition = condition,
function = function,
skip = skip)
def pass_test(self, condition = 1, function = None):
"""Cause the test to pass.
"""
if not condition:
return
self.condition = 'pass_test'
pass_test(self = self, condition = condition, function = function)
def preserve(self, *conditions):
"""Arrange for the temporary working directories for the
specified TestCmd environment to be preserved for one or more
conditions. If no conditions are specified, arranges for
the temporary working directories to be preserved for all
conditions.
"""
if conditions is ():
conditions = ('pass_test', 'fail_test', 'no_result')
for cond in conditions:
self._preserve[cond] = 1
def program_set(self, program):
"""Set the executable program or script to be tested.
"""
if program and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
self.program = program
def read(self, file, mode = 'rb'):
"""Reads and returns the contents of the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name. The I/O mode for the file may
be specified; it must begin with an 'r'. The default is
'rb' (binary read).
"""
file = self.canonicalize(file)
if mode[0] != 'r':
raise ValueError, "mode must begin with 'r'"
with open(file, mode) as f:
result = f.read()
return result
def rmdir(self, dir):
"""Removes the specified dir name.
The dir name may be a list, in which case the elements are
concatenated with the os.path.join() method. The dir is
assumed to be under the temporary working directory unless it
is an absolute path name.
The dir must be empty.
"""
dir = self.canonicalize(dir)
os.rmdir(dir)
def start(self, program = None,
interpreter = None,
arguments = None,
universal_newlines = None,
**kw):
"""
Starts a program or script for the test environment.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
cmd = self.command_args(program, interpreter, arguments)
cmd_string = string.join(map(self.escape, cmd), ' ')
if self.verbose:
sys.stderr.write(cmd_string + "\n")
if universal_newlines is None:
universal_newlines = self.universal_newlines
# On Windows, if we make stdin a pipe when we plan to send
# no input, and the test program exits before
# Popen calls msvcrt.open_osfhandle, that call will fail.
# So don't use a pipe for stdin if we don't need one.
stdin = kw.get('stdin', None)
if stdin is not None:
stdin = subprocess.PIPE
combine = kw.get('combine', self.combine)
if combine:
stderr_value = subprocess.STDOUT
else:
stderr_value = subprocess.PIPE
return Popen(cmd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=stderr_value,
universal_newlines=universal_newlines)
def finish(self, popen, **kw):
"""
Finishes and waits for the process being run under control of
the specified popen argument, recording the exit status,
standard output and error output.
"""
popen.stdin.close()
self.status = popen.wait()
if not self.status:
self.status = 0
self._stdout.append(popen.stdout.read())
if popen.stderr:
stderr = popen.stderr.read()
else:
stderr = ''
self._stderr.append(stderr)
def run(self, program = None,
interpreter = None,
arguments = None,
chdir = None,
stdin = None,
universal_newlines = None):
"""Runs a test of the program or script for the test
environment. Standard output and error output are saved for
future retrieval via the stdout() and stderr() methods.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
if chdir:
oldcwd = os.getcwd()
if not os.path.isabs(chdir):
chdir = os.path.join(self.workpath(chdir))
if self.verbose:
sys.stderr.write("chdir(" + chdir + ")\n")
os.chdir(chdir)
p = self.start(program,
interpreter,
arguments,
universal_newlines,
stdin=stdin)
if stdin:
if is_List(stdin):
for line in stdin:
p.stdin.write(line)
else:
p.stdin.write(stdin)
p.stdin.close()
out = p.stdout.read()
if p.stderr is None:
err = ''
else:
err = p.stderr.read()
try:
close_output = p.close_output
except AttributeError:
p.stdout.close()
if not p.stderr is None:
p.stderr.close()
else:
close_output()
self._stdout.append(out)
self._stderr.append(err)
self.status = p.wait()
if not self.status:
self.status = 0
if chdir:
os.chdir(oldcwd)
if self.verbose >= 2:
write = sys.stdout.write
write('============ STATUS: %d\n' % self.status)
out = self.stdout()
if out or self.verbose >= 3:
write('============ BEGIN STDOUT (len=%d):\n' % len(out))
write(out)
write('============ END STDOUT\n')
err = self.stderr()
if err or self.verbose >= 3:
write('============ BEGIN STDERR (len=%d)\n' % len(err))
write(err)
write('============ END STDERR\n')
def sleep(self, seconds = default_sleep_seconds):
"""Sleeps at least the specified number of seconds. If no
number is specified, sleeps at least the minimum number of
seconds necessary to advance file time stamps on the current
system. Sleeping more seconds is all right.
"""
time.sleep(seconds)
def stderr(self, run = None):
"""Returns the error output from the specified run number.
If there is no specified run number, then returns the error
output of the last run. If the run number is less than zero,
then returns the error output from that many runs back from the
current run.
"""
if not run:
run = len(self._stderr)
elif run < 0:
run = len(self._stderr) + run
run = run - 1
return self._stderr[run]
def stdout(self, run = None):
"""Returns the standard output from the specified run number.
If there is no specified run number, then returns the standard
output of the last run. If the run number is less than zero,
then returns the standard output from that many runs back from
the current run.
"""
if not run:
run = len(self._stdout)
elif run < 0:
run = len(self._stdout) + run
run = run - 1
return self._stdout[run]
def subdir(self, *subdirs):
"""Create new subdirectories under the temporary working
directory, one for each argument. An argument may be a list,
in which case the list elements are concatenated using the
os.path.join() method. Subdirectories multiple levels deep
must be created using a separate argument for each level:
test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory'])
Returns the number of subdirectories actually created.
"""
count = 0
for sub in subdirs:
if sub is None:
continue
if is_List(sub):
sub = apply(os.path.join, tuple(sub))
new = os.path.join(self.workdir, sub)
try:
os.mkdir(new)
except OSError:
pass
else:
count = count + 1
return count
def symlink(self, target, link):
"""Creates a symlink to the specified target.
The link name may be a list, in which case the elements are
concatenated with the os.path.join() method. The link is
assumed to be under the temporary working directory unless it
is an absolute path name. The target is *not* assumed to be
under the temporary working directory.
"""
link = self.canonicalize(link)
os.symlink(target, link)
def tempdir(self, path=None):
"""Creates a temporary directory.
A unique directory name is generated if no path name is specified.
The directory is created, and will be removed when the TestCmd
object is destroyed.
"""
if path is None:
try:
path = tempfile.mktemp(prefix=tempfile.template)
except TypeError:
path = tempfile.mktemp()
os.mkdir(path)
# Symlinks in the path will report things
# differently from os.getcwd(), so chdir there
# and back to fetch the canonical path.
cwd = os.getcwd()
try:
os.chdir(path)
path = os.getcwd()
finally:
os.chdir(cwd)
# Uppercase the drive letter since the case of drive
# letters is pretty much random on win32:
drive,rest = os.path.splitdrive(path)
if drive:
path = string.upper(drive) + rest
#
self._dirlist.append(path)
global _Cleanup
try:
_Cleanup.index(self)
except ValueError:
_Cleanup.append(self)
return path
def touch(self, path, mtime=None):
"""Updates the modification time on the specified file or
directory path name. The default is to update to the
current time if no explicit modification time is specified.
"""
path = self.canonicalize(path)
atime = os.path.getatime(path)
if mtime is None:
mtime = time.time()
os.utime(path, (atime, mtime))
def unlink(self, file):
"""Unlinks the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name.
"""
file = self.canonicalize(file)
os.unlink(file)
def verbose_set(self, verbose):
"""Set the verbose level.
"""
self.verbose = verbose
def where_is(self, file, path=None, pathext=None):
"""Find an executable file.
"""
if is_List(file):
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = where_is(file, path, pathext)
return file
def workdir_set(self, path):
"""Creates a temporary working directory with the specified
path name. If the path is a null string (''), a unique
directory name is created.
"""
if (path != None):
if path == '':
path = None
path = self.tempdir(path)
self.workdir = path
def workpath(self, *args):
"""Returns the absolute path name to a subdirectory or file
within the current temporary working directory. Concatenates
the temporary working directory name with the specified
arguments using the os.path.join() method.
"""
return apply(os.path.join, (self.workdir,) + tuple(args))
def readable(self, top, read=1):
"""Make the specified directory tree readable (read == 1)
or not (read == None).
This method has no effect on Windows systems, which use a
completely different mechanism to control file readability.
"""
if sys.platform == 'win32':
return
if read:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IREAD))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IREAD))
if os.path.isfile(top):
# If it's a file, that's easy, just chmod it.
do_chmod(top)
elif read:
# It's a directory and we're trying to turn on read
# permission, so it's also pretty easy, just chmod the
# directory and then chmod every entry on our walk down the
# tree. Because os.path.walk() is top-down, we'll enable
# read permission on any directories that have it disabled
# before os.path.walk() tries to list their contents.
do_chmod(top)
def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
for n in names:
do_chmod(os.path.join(dirname, n))
os.path.walk(top, chmod_entries, None)
else:
# It's a directory and we're trying to turn off read
# permission, which means we have to chmod the directoreis
# in the tree bottom-up, lest disabling read permission from
# the top down get in the way of being able to get at lower
# parts of the tree. But os.path.walk() visits things top
# down, so we just use an object to collect a list of all
# of the entries in the tree, reverse the list, and then
# chmod the reversed (bottom-up) list.
col = Collector(top)
os.path.walk(top, col, None)
col.entries.reverse()
for d in col.entries: do_chmod(d)
def writable(self, top, write=1):
"""Make the specified directory tree writable (write == 1)
or not (write == None).
"""
if sys.platform == 'win32':
if write:
def do_chmod(fname):
try: os.chmod(fname, stat.S_IWRITE)
except OSError: pass
else:
def do_chmod(fname):
try: os.chmod(fname, stat.S_IREAD)
except OSError: pass
else:
if write:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0200))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0200))
if os.path.isfile(top):
do_chmod(top)
else:
col = Collector(top)
os.path.walk(top, col, None)
for d in col.entries: do_chmod(d)
def executable(self, top, execute=1):
"""Make the specified directory tree executable (execute == 1)
or not (execute == None).
This method has no effect on Windows systems, which use a
completely different mechanism to control file executability.
"""
if sys.platform == 'win32':
return
if execute:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IEXEC))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IEXEC))
if os.path.isfile(top):
# If it's a file, that's easy, just chmod it.
do_chmod(top)
elif execute:
# It's a directory and we're trying to turn on execute
# permission, so it's also pretty easy, just chmod the
# directory and then chmod every entry on our walk down the
# tree. Because os.path.walk() is top-down, we'll enable
# execute permission on any directories that have it disabled
# before os.path.walk() tries to list their contents.
do_chmod(top)
def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
for n in names:
do_chmod(os.path.join(dirname, n))
os.path.walk(top, chmod_entries, None)
else:
# It's a directory and we're trying to turn off execute
# permission, which means we have to chmod the directories
# in the tree bottom-up, lest disabling execute permission from
# the top down get in the way of being able to get at lower
# parts of the tree. But os.path.walk() visits things top
# down, so we just use an object to collect a list of all
# of the entries in the tree, reverse the list, and then
# chmod the reversed (bottom-up) list.
col = Collector(top)
os.path.walk(top, col, None)
col.entries.reverse()
for d in col.entries: do_chmod(d)
def write(self, file, content, mode = 'wb'):
"""Writes the specified content text (second argument) to the
specified file name (first argument). The file name may be
a list, in which case the elements are concatenated with the
os.path.join() method. The file is created under the temporary
working directory. Any subdirectories in the path must already
exist. The I/O mode for the file may be specified; it must
begin with a 'w'. The default is 'wb' (binary write).
"""
file = self.canonicalize(file)
if mode[0] != 'w':
raise ValueError, "mode must begin with 'w'"
with open(file, mode) as f:
f.write(content)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
ShassAro/ShassAro | DockerAdmin/dockerVirtualEnv/lib/python2.7/site-packages/pip/download.py | 328 | 22580 | import cgi
import email.utils
import hashlib
import getpass
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
import pip
from pip.backwardcompat import urllib, urlparse, raw_input
from pip.exceptions import InstallationError, HashMismatch
from pip.util import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
create_download_cache_folder, cache_download)
from pip.vcs import vcs
from pip.log import logger
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.compat import IncompleteRead
from pip._vendor.requests.exceptions import InvalidURL, ChunkedEncodingError
from pip._vendor.requests.models import Response
from pip._vendor.requests.structures import CaseInsensitiveDict
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url', 'unpack_http_url']
def user_agent():
"""Return a string representing the user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([
_implementation_version,
sys.pypy_version_info.releaselevel,
])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['pip/%s' % pip.__version__,
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urlparse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.split("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urlparse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simple return the response
if not self.prompting:
return resp
parsed = urlparse.urlparse(resp.url)
# Prompt the user for a new username and password
username = raw_input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSResponse(object):
def __init__(self, fileobj):
self.fileobj = fileobj
def __getattr__(self, name):
return getattr(self.fileobj, name)
def read(self, amt=None, decode_content=None, cache_content=False):
return self.fileobj.read(amt)
# Insert Hacks to Make Cookie Jar work w/ Requests
@property
def _original_response(self):
class FakeMessage(object):
def getheaders(self, header):
return []
def get_all(self, header, default):
return []
class FakeResponse(object):
@property
def msg(self):
return FakeMessage()
return FakeResponse()
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
parsed_url = urlparse.urlparse(request.url)
# We only work for requests with a host of localhost
if parsed_url.netloc.lower() != "localhost":
raise InvalidURL("Invalid URL %r: Only localhost is allowed" %
request.url)
real_url = urlparse.urlunparse(parsed_url[:1] + ("",) + parsed_url[2:])
pathname = url_to_path(real_url)
resp = Response()
resp.status_code = 200
resp.url = real_url
stats = os.stat(pathname)
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
resp.headers = CaseInsensitiveDict({
"Content-Type": mimetypes.guess_type(pathname)[0] or "text/plain",
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = LocalFSResponse(open(pathname, "rb"))
resp.close = resp.raw.close
return resp
def close(self):
pass
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
def request(self, method, url, *args, **kwargs):
# Make file:// urls not fail due to lack of a hostname
parsed = urlparse.urlparse(url)
if parsed.scheme == "file":
url = urlparse.urlunparse(parsed[:1] + ("localhost",) + parsed[2:])
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
session = PipSession()
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
## FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
if six.PY3:
return resp.url, resp.text
else:
return resp.url, resp.content
try:
f = open(url)
content = f.read()
except IOError:
e = sys.exc_info()[1]
raise InstallationError('Could not open requirements file: %s' % str(e))
else:
f.close()
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
path = url[len('file:'):].lstrip('/')
path = urllib.unquote(path)
if _url_drive_re.match(path):
path = path[0] + ':' + path[2:]
else:
path = '/' + path
return path
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join([urllib.quote(part) for part in filepath])
if not drive:
url = url.lstrip('/')
return 'file:///' + drive + url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
archives = ('.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.pybundle',
'.whl')
ext = splitext(name)[1].lower()
if ext in archives:
return True
return False
def unpack_vcs_link(link, location, only_download=False):
vcs_backend = _get_used_vcs_backend(link)
if only_download:
vcs_backend.export(location)
else:
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_hash(download_hash, link):
if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
logger.fatal("Hash digest size of the package %d (%s) doesn't match the expected hash name %s!"
% (download_hash.digest_size, link, link.hash_name))
raise HashMismatch('Hash name mismatch for package %s' % link)
if download_hash.hexdigest() != link.hash:
logger.fatal("Hash of the package %s (%s) doesn't match the expected hash %s!"
% (link, download_hash.hexdigest(), link.hash))
raise HashMismatch('Bad %s hash for package %s' % (link.hash_name, link))
def _get_hash_from_file(target_file, link):
try:
download_hash = hashlib.new(link.hash_name)
except (ValueError, TypeError):
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
return None
fp = open(target_file, 'rb')
while True:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
fp.close()
return download_hash
def _download_url(resp, link, temp_location):
fp = open(temp_location, 'wb')
download_hash = None
if link.hash and link.hash_name:
try:
download_hash = hashlib.new(link.hash_name)
except ValueError:
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
downloaded = 0
show_progress = total_length > 40 * 1000 or not total_length
show_url = link.show_url
try:
if show_progress:
## FIXME: the URL can get really long in this message:
if total_length:
logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
else:
logger.start_progress('Downloading %s (unknown size): ' % show_url)
else:
logger.notify('Downloading %s' % show_url)
logger.info('Downloading from URL %s' % link)
def resp_read(chunk_size):
try:
# Special case for urllib3.
try:
for chunk in resp.raw.stream(
chunk_size, decode_content=False):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
for chunk in resp_read(4096):
downloaded += len(chunk)
if show_progress:
if not total_length:
logger.show_progress('%s' % format_size(downloaded))
else:
logger.show_progress('%3i%% %s' % (100 * downloaded / total_length, format_size(downloaded)))
if download_hash is not None:
download_hash.update(chunk)
fp.write(chunk)
fp.close()
finally:
if show_progress:
logger.end_progress('%s downloaded' % format_size(downloaded))
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warn('Backing up %s to %s'
% (display_path(download_location), display_path(dest_file)))
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.notify('Saved %s' % display_path(download_location))
def unpack_http_url(link, location, download_cache, download_dir=None,
session=None):
if session is None:
session = PipSession()
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
temp_location = None
target_url = link.url.split('#', 1)[0]
already_cached = False
cache_file = None
cache_content_type_file = None
download_hash = None
# If a download cache is specified, is the file cached there?
if download_cache:
cache_file = os.path.join(download_cache,
urllib.quote(target_url, ''))
cache_content_type_file = cache_file + '.content-type'
already_cached = (
os.path.exists(cache_file) and
os.path.exists(cache_content_type_file)
)
if not os.path.isdir(download_cache):
create_download_cache_folder(download_cache)
# If a download dir is specified, is the file already downloaded there?
already_downloaded = None
if download_dir:
already_downloaded = os.path.join(download_dir, link.filename)
if not os.path.exists(already_downloaded):
already_downloaded = None
# If already downloaded, does it's hash match?
if already_downloaded:
temp_location = already_downloaded
content_type = mimetypes.guess_type(already_downloaded)[0]
logger.notify('File was already downloaded %s' % already_downloaded)
if link.hash:
download_hash = _get_hash_from_file(temp_location, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(already_downloaded)
already_downloaded = None
# If not a valid download, let's confirm the cached file is valid
if already_cached and not temp_location:
with open(cache_content_type_file) as fp:
content_type = fp.read().strip()
temp_location = cache_file
logger.notify('Using download cache from %s' % cache_file)
if link.hash and link.hash_name:
download_hash = _get_hash_from_file(cache_file, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Cached file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(cache_file)
os.unlink(cache_content_type_file)
already_cached = False
# We don't have either a cached or a downloaded copy
# let's download to a tmp dir
if not temp_location:
try:
resp = session.get(target_url, stream=True)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.fatal("HTTP error %s while getting %s" %
(exc.response.status_code, link))
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
temp_location = os.path.join(temp_dir, filename)
download_hash = _download_url(resp, link, temp_location)
if link.hash and link.hash_name:
_check_hash(download_hash, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded:
_copy_file(temp_location, download_dir, content_type, link)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(temp_location, location, content_type, link)
# if using a download cache, cache it, if needed
if cache_file and not already_cached:
cache_download(cache_file, temp_location, content_type)
if not (already_cached or already_downloaded):
os.unlink(temp_location)
os.rmdir(temp_dir)
def unpack_file_url(link, location, download_dir=None):
link_path = url_to_path(link.url_without_fragment)
already_downloaded = False
# If it's a url to a local directory
if os.path.isdir(link_path):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
return
# if link has a hash, let's confirm it matches
if link.hash:
link_path_hash = _get_hash_from_file(link_path, link)
_check_hash(link_path_hash, link)
# If a download dir is specified, is the file already there and valid?
if download_dir:
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
content_type = mimetypes.guess_type(download_path)[0]
logger.notify('File was already downloaded %s' % download_path)
if link.hash:
download_hash = _get_hash_from_file(download_path, link)
try:
_check_hash(download_hash, link)
already_downloaded = True
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % link_path
)
os.unlink(download_path)
else:
already_downloaded = True
if already_downloaded:
from_path = download_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded:
_copy_file(from_path, download_dir, content_type, link)
| gpl-2.0 |
harshita-gupta/Harvard-FRSEM-Catalog-2016-17 | flask/lib/python2.7/site-packages/wheel/test/test_basic.py | 472 | 6405 | """
Basic wheel tests.
"""
import os
import pkg_resources
import json
import sys
from pkg_resources import resource_filename
import wheel.util
import wheel.tool
from wheel import egg2wheel
from wheel.install import WheelFile
from zipfile import ZipFile
from shutil import rmtree
test_distributions = ("complex-dist", "simple.dist", "headers.dist")
def teardown_module():
"""Delete eggs/wheels created by tests."""
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
for subdir in ('build', 'dist'):
try:
rmtree(os.path.join(base, dist, subdir))
except OSError:
pass
def setup_module():
build_wheel()
build_egg()
def build_wheel():
"""Build wheels from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_wheel']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def build_egg():
"""Build eggs from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_egg']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def test_findable():
"""Make sure pkg_resources can find us."""
assert pkg_resources.working_set.by_key['wheel'].version
def test_egg_re():
"""Make sure egg_info_re matches."""
egg_names = open(pkg_resources.resource_filename('wheel', 'eggnames.txt'))
for line in egg_names:
line = line.strip()
if not line:
continue
assert egg2wheel.egg_info_re.match(line), line
def test_compatibility_tags():
"""Test compatibilty tags are working."""
wf = WheelFile("package-1.0.0-cp32.cp33-noabi-noarch.whl")
assert (list(wf.compatibility_tags) ==
[('cp32', 'noabi', 'noarch'), ('cp33', 'noabi', 'noarch')])
assert (wf.arity == 2)
wf2 = WheelFile("package-1.0.0-1st-cp33-noabi-noarch.whl")
wf2_info = wf2.parsed_filename.groupdict()
assert wf2_info['build'] == '1st', wf2_info
def test_convert_egg():
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
distdir = os.path.join(base, dist, 'dist')
eggs = [e for e in os.listdir(distdir) if e.endswith('.egg')]
wheel.tool.convert(eggs, distdir, verbose=False)
def test_unpack():
"""
Make sure 'wheel unpack' works.
This also verifies the integrity of our testing wheel files.
"""
for dist in test_distributions:
distdir = pkg_resources.resource_filename('wheel.test',
os.path.join(dist, 'dist'))
for wheelfile in (w for w in os.listdir(distdir) if w.endswith('.whl')):
wheel.tool.unpack(os.path.join(distdir, wheelfile), distdir)
def test_no_scripts():
"""Make sure entry point scripts are not generated."""
dist = "complex-dist"
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
assert not '.data/scripts/' in entry.filename
def test_pydist():
"""Make sure pydist.json exists and validates against our schema."""
# XXX this test may need manual cleanup of older wheels
import jsonschema
def open_json(filename):
return json.loads(open(filename, 'rb').read().decode('utf-8'))
pymeta_schema = open_json(resource_filename('wheel.test',
'pydist-schema.json'))
valid = 0
for dist in ("simple.dist", "complex-dist"):
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
if entry.filename.endswith('/metadata.json'):
pymeta = json.loads(whl.read(entry).decode('utf-8'))
jsonschema.validate(pymeta, pymeta_schema)
valid += 1
assert valid > 0, "No metadata.json found"
def test_util():
"""Test functions in util.py."""
for i in range(10):
before = b'*' * i
encoded = wheel.util.urlsafe_b64encode(before)
assert not encoded.endswith(b'=')
after = wheel.util.urlsafe_b64decode(encoded)
assert before == after
def test_pick_best():
"""Test the wheel ranking algorithm."""
def get_tags(res):
info = res[-1].parsed_filename.groupdict()
return info['pyver'], info['abi'], info['plat']
cand_tags = [('py27', 'noabi', 'noarch'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'),
('cp26', 'noabi', 'linux_i686'),
('cp27', 'noabi', 'linux_x86_64'),
('cp26', 'noabi', 'linux_x86_64')]
cand_wheels = [WheelFile('testpkg-1.0-%s-%s-%s.whl' % t)
for t in cand_tags]
supported = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
supported2 = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch'),
('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch')]
supported3 = [('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
for supp in (supported, supported2, supported3):
context = lambda: list(supp)
for wheel in cand_wheels:
wheel.context = context
best = max(cand_wheels)
assert list(best.tags)[0] == supp[0]
# assert_equal(
# list(map(get_tags, pick_best(cand_wheels, supp, top=False))), supp)
| mit |
NaturalGIS/naturalgis_qgis | tests/src/python/test_qgsimagecache.py | 41 | 5431 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsImageCache.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2018 by Nyall Dawson'
__date__ = '02/10/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import qgis # NOQA
import os
import socketserver
import threading
import http.server
import time
from qgis.PyQt.QtCore import QDir, QCoreApplication, QSize
from qgis.PyQt.QtGui import QColor, QImage, QPainter
from qgis.core import (QgsImageCache, QgsRenderChecker, QgsApplication, QgsMultiRenderChecker)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class SlowHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
time.sleep(1)
return http.server.SimpleHTTPRequestHandler.do_GET(self)
class TestQgsImageCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Bring up a simple HTTP server, for remote SVG tests
os.chdir(unitTestDataPath() + '')
handler = SlowHTTPRequestHandler
cls.httpd = socketserver.TCPServer(('localhost', 0), handler)
cls.port = cls.httpd.server_address[1]
cls.httpd_thread = threading.Thread(target=cls.httpd.serve_forever)
cls.httpd_thread.setDaemon(True)
cls.httpd_thread.start()
def setUp(self):
self.report = "<h1>Python QgsImageCache Tests</h1>\n"
self.fetched = False
QgsApplication.imageCache().remoteImageFetched.connect(self.imageFetched)
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def imageFetched(self):
self.fetched = True
def waitForFetch(self):
self.fetched = False
while not self.fetched:
QCoreApplication.processEvents()
def testRemoteImage(self):
"""Test fetching remote image."""
url = 'http://localhost:{}/qgis_local_server/sample_image.png'.format(str(TestQgsImageCache.port))
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote Image', 'waiting_image', image))
self.assertFalse(QgsApplication.imageCache().originalSize(url).isValid())
self.waitForFetch()
# second should be correct image
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True)
self.assertTrue(self.imageCheck('Remote Image', 'remote_image', image))
self.assertEqual(QgsApplication.imageCache().originalSize(url), QSize(511, 800), 1.0)
def testRemoteImageMissing(self):
"""Test fetching remote image with bad url"""
url = 'http://localhost:{}/qgis_local_server/xxx.png'.format(str(TestQgsImageCache.port)) # oooo naughty
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True)
self.assertTrue(self.imageCheck('Remote image missing', 'waiting_image', image))
def testRemoteImageBlocking(self):
"""Test fetching remote image."""
# remote not yet requested so not in cache
url = 'http://localhost:{}/qgis_local_server/logo_2017.png'.format(str(TestQgsImageCache.port))
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True, blocking=1)
# first should be correct image
self.assertTrue(self.imageCheck('Remote image sync', 'remote_image_blocking', image))
# remote probably in cache
url = 'http://localhost:{}/qgis_local_server/sample_image.png'.format(str(TestQgsImageCache.port))
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True, blocking=1)
self.assertTrue(self.imageCheck('Remote Image', 'remote_image', image))
# remote probably in cache
url = 'http://localhost:{}/qgis_local_server/xxx.png'.format(str(TestQgsImageCache.port)) # oooo naughty
image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True, blocking=1)
self.assertTrue(self.imageCheck('Remote image missing', 'waiting_image', image))
def imageCheck(self, name, reference_image, image):
self.report += "<h2>Render {}</h2>\n".format(name)
temp_dir = QDir.tempPath() + '/'
file_name = temp_dir + 'image_' + name + ".png"
output_image = QImage(image.size(), QImage.Format_RGB32)
QgsMultiRenderChecker.drawBackground(output_image)
painter = QPainter(output_image)
painter.drawImage(0, 0, image)
painter.end()
output_image.save(file_name, "PNG")
checker = QgsRenderChecker()
checker.setControlPathPrefix("image_cache")
checker.setControlName("expected_" + reference_image)
checker.setRenderedImage(file_name)
checker.setColorTolerance(2)
result = checker.compareImages(name, 20)
self.report += checker.report()
print((self.report))
return result
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
JerryLX/DPDK | tools/dpdk-devbind.py | 2 | 20921 | #! /usr/bin/python
#
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import os
import getopt
import subprocess
from os.path import exists, abspath, dirname, basename
# The PCI base class for NETWORK devices
NETWORK_BASE_CLASS = "02"
# global dict ethernet devices present. Dictionary indexed by PCI address.
# Each device within this is itself a dictionary of device properties
devices = {}
# list of supported DPDK drivers
dpdk_drivers = ["igb_uio", "vfio-pci", "uio_pci_generic"]
# command-line arg flags
b_flag = None
status_flag = False
force_flag = False
args = []
def usage():
'''Print usage information for the program'''
argv0 = basename(sys.argv[0])
print("""
Usage:
------
%(argv0)s [options] DEVICE1 DEVICE2 ....
where DEVICE1, DEVICE2 etc, are specified via PCI "domain:bus:slot.func" syntax
or "bus:slot.func" syntax. For devices bound to Linux kernel drivers, they may
also be referred to by Linux interface name e.g. eth0, eth1, em0, em1, etc.
Options:
--help, --usage:
Display usage information and quit
-s, --status:
Print the current status of all known network interfaces.
For each device, it displays the PCI domain, bus, slot and function,
along with a text description of the device. Depending upon whether the
device is being used by a kernel driver, the igb_uio driver, or no
driver, other relevant information will be displayed:
* the Linux interface name e.g. if=eth0
* the driver being used e.g. drv=igb_uio
* any suitable drivers not currently using that device
e.g. unused=igb_uio
NOTE: if this flag is passed along with a bind/unbind option, the
status display will always occur after the other operations have taken
place.
-b driver, --bind=driver:
Select the driver to use or \"none\" to unbind the device
-u, --unbind:
Unbind a device (Equivalent to \"-b none\")
--force:
By default, devices which are used by Linux - as indicated by having
routes in the routing table - cannot be modified. Using the --force
flag overrides this behavior, allowing active links to be forcibly
unbound.
WARNING: This can lead to loss of network connection and should be used
with caution.
Examples:
---------
To display current device status:
%(argv0)s --status
To bind eth1 from the current driver and move to use igb_uio
%(argv0)s --bind=igb_uio eth1
To unbind 0000:01:00.0 from using any driver
%(argv0)s -u 0000:01:00.0
To bind 0000:02:00.0 and 0000:02:00.1 to the ixgbe kernel driver
%(argv0)s -b ixgbe 02:00.0 02:00.1
""" % locals()) # replace items from local variables
# This is roughly compatible with check_output function in subprocess module
# which is only available in python 2.7.
def check_output(args, stderr=None):
'''Run a command and capture its output'''
return subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=stderr).communicate()[0]
def find_module(mod):
'''find the .ko file for kernel module named mod.
Searches the $RTE_SDK/$RTE_TARGET directory, the kernel
modules directory and finally under the parent directory of
the script '''
# check $RTE_SDK/$RTE_TARGET directory
if 'RTE_SDK' in os.environ and 'RTE_TARGET' in os.environ:
path = "%s/%s/kmod/%s.ko" % (os.environ['RTE_SDK'],
os.environ['RTE_TARGET'], mod)
if exists(path):
return path
# check using depmod
try:
depmod_out = check_output(["modinfo", "-n", mod],
stderr=subprocess.STDOUT).lower()
if "error" not in depmod_out:
path = depmod_out.strip()
if exists(path):
return path
except: # if modinfo can't find module, it fails, so continue
pass
# check for a copy based off current path
tools_dir = dirname(abspath(sys.argv[0]))
if (tools_dir.endswith("tools")):
base_dir = dirname(tools_dir)
find_out = check_output(["find", base_dir, "-name", mod + ".ko"])
if len(find_out) > 0: # something matched
path = find_out.splitlines()[0]
if exists(path):
return path
def check_modules():
'''Checks that igb_uio is loaded'''
global dpdk_drivers
# list of supported modules
mods = [{"Name": driver, "Found": False} for driver in dpdk_drivers]
# first check if module is loaded
try:
# Get list of sysfs modules (both built-in and dynamically loaded)
sysfs_path = '/sys/module/'
# Get the list of directories in sysfs_path
sysfs_mods = [os.path.join(sysfs_path, o) for o
in os.listdir(sysfs_path)
if os.path.isdir(os.path.join(sysfs_path, o))]
# Extract the last element of '/sys/module/abc' in the array
sysfs_mods = [a.split('/')[-1] for a in sysfs_mods]
# special case for vfio_pci (module is named vfio-pci,
# but its .ko is named vfio_pci)
sysfs_mods = map(lambda a:
a if a != 'vfio_pci' else 'vfio-pci', sysfs_mods)
for mod in mods:
if mod["Name"] in sysfs_mods:
mod["Found"] = True
except:
pass
# check if we have at least one loaded module
if True not in [mod["Found"] for mod in mods] and b_flag is not None:
if b_flag in dpdk_drivers:
print("Error - no supported modules(DPDK driver) are loaded")
sys.exit(1)
else:
print("Warning - no supported modules(DPDK driver) are loaded")
# change DPDK driver list to only contain drivers that are loaded
dpdk_drivers = [mod["Name"] for mod in mods if mod["Found"]]
def has_driver(dev_id):
'''return true if a device is assigned to a driver. False otherwise'''
return "Driver_str" in devices[dev_id]
def get_pci_device_details(dev_id):
'''This function gets additional details for a PCI device'''
device = {}
extra_info = check_output(["lspci", "-vmmks", dev_id]).splitlines()
# parse lspci details
for line in extra_info:
if len(line) == 0:
continue
name, value = line.decode().split("\t", 1)
name = name.strip(":") + "_str"
device[name] = value
# check for a unix interface name
device["Interface"] = ""
for base, dirs, _ in os.walk("/sys/bus/pci/devices/%s/" % dev_id):
if "net" in dirs:
device["Interface"] = \
",".join(os.listdir(os.path.join(base, "net")))
break
# check if a port is used for ssh connection
device["Ssh_if"] = False
device["Active"] = ""
return device
def get_nic_details():
'''This function populates the "devices" dictionary. The keys used are
the pci addresses (domain:bus:slot.func). The values are themselves
dictionaries - one for each NIC.'''
global devices
global dpdk_drivers
# clear any old data
devices = {}
# first loop through and read details for all devices
# request machine readable format, with numeric IDs
dev = {}
dev_lines = check_output(["lspci", "-Dvmmn"]).splitlines()
for dev_line in dev_lines:
if (len(dev_line) == 0):
if dev["Class"][0:2] == NETWORK_BASE_CLASS:
# convert device and vendor ids to numbers, then add to global
dev["Vendor"] = int(dev["Vendor"], 16)
dev["Device"] = int(dev["Device"], 16)
# use dict to make copy of dev
devices[dev["Slot"]] = dict(dev)
else:
name, value = dev_line.decode().split("\t", 1)
dev[name.rstrip(":")] = value
# check what is the interface if any for an ssh connection if
# any to this host, so we can mark it later.
ssh_if = []
route = check_output(["ip", "-o", "route"])
# filter out all lines for 169.254 routes
route = "\n".join(filter(lambda ln: not ln.startswith("169.254"),
route.decode().splitlines()))
rt_info = route.split()
for i in range(len(rt_info) - 1):
if rt_info[i] == "dev":
ssh_if.append(rt_info[i+1])
# based on the basic info, get extended text details
for d in devices.keys():
# get additional info and add it to existing data
devices[d] = devices[d].copy()
devices[d].update(get_pci_device_details(d).items())
for _if in ssh_if:
if _if in devices[d]["Interface"].split(","):
devices[d]["Ssh_if"] = True
devices[d]["Active"] = "*Active*"
break
# add igb_uio to list of supporting modules if needed
if "Module_str" in devices[d]:
for driver in dpdk_drivers:
if driver not in devices[d]["Module_str"]:
devices[d]["Module_str"] = \
devices[d]["Module_str"] + ",%s" % driver
else:
devices[d]["Module_str"] = ",".join(dpdk_drivers)
# make sure the driver and module strings do not have any duplicates
if has_driver(d):
modules = devices[d]["Module_str"].split(",")
if devices[d]["Driver_str"] in modules:
modules.remove(devices[d]["Driver_str"])
devices[d]["Module_str"] = ",".join(modules)
def dev_id_from_dev_name(dev_name):
'''Take a device "name" - a string passed in by user to identify a NIC
device, and determine the device id - i.e. the domain:bus:slot.func - for
it, which can then be used to index into the devices array'''
# check if it's already a suitable index
if dev_name in devices:
return dev_name
# check if it's an index just missing the domain part
elif "0000:" + dev_name in devices:
return "0000:" + dev_name
else:
# check if it's an interface name, e.g. eth1
for d in devices.keys():
if dev_name in devices[d]["Interface"].split(","):
return devices[d]["Slot"]
# if nothing else matches - error
print("Unknown device: %s. "
"Please specify device in \"bus:slot.func\" format" % dev_name)
sys.exit(1)
def unbind_one(dev_id, force):
'''Unbind the device identified by "dev_id" from its current driver'''
dev = devices[dev_id]
if not has_driver(dev_id):
print("%s %s %s is not currently managed by any driver\n" %
(dev["Slot"], dev["Device_str"], dev["Interface"]))
return
# prevent us disconnecting ourselves
if dev["Ssh_if"] and not force:
print("Routing table indicates that interface %s is active. "
"Skipping unbind" % (dev_id))
return
# write to /sys to unbind
filename = "/sys/bus/pci/drivers/%s/unbind" % dev["Driver_str"]
try:
f = open(filename, "a")
except:
print("Error: unbind failed for %s - Cannot open %s"
% (dev_id, filename))
sys.exit(1)
f.write(dev_id)
f.close()
def bind_one(dev_id, driver, force):
'''Bind the device given by "dev_id" to the driver "driver". If the device
is already bound to a different driver, it will be unbound first'''
dev = devices[dev_id]
saved_driver = None # used to rollback any unbind in case of failure
# prevent disconnection of our ssh session
if dev["Ssh_if"] and not force:
print("Routing table indicates that interface %s is active. "
"Not modifying" % (dev_id))
return
# unbind any existing drivers we don't want
if has_driver(dev_id):
if dev["Driver_str"] == driver:
print("%s already bound to driver %s, skipping\n"
% (dev_id, driver))
return
else:
saved_driver = dev["Driver_str"]
unbind_one(dev_id, force)
dev["Driver_str"] = "" # clear driver string
# if we are binding to one of DPDK drivers, add PCI id's to that driver
if driver in dpdk_drivers:
filename = "/sys/bus/pci/drivers/%s/new_id" % driver
try:
f = open(filename, "w")
except:
print("Error: bind failed for %s - Cannot open %s"
% (dev_id, filename))
return
try:
f.write("%04x %04x" % (dev["Vendor"], dev["Device"]))
f.close()
except:
print("Error: bind failed for %s - Cannot write new PCI ID to "
"driver %s" % (dev_id, driver))
return
# do the bind by writing to /sys
filename = "/sys/bus/pci/drivers/%s/bind" % driver
try:
f = open(filename, "a")
except:
print("Error: bind failed for %s - Cannot open %s"
% (dev_id, filename))
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
try:
f.write(dev_id)
f.close()
except:
# for some reason, closing dev_id after adding a new PCI ID to new_id
# results in IOError. however, if the device was successfully bound,
# we don't care for any errors and can safely ignore IOError
tmp = get_pci_device_details(dev_id)
if "Driver_str" in tmp and tmp["Driver_str"] == driver:
return
print("Error: bind failed for %s - Cannot bind to driver %s"
% (dev_id, driver))
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
def unbind_all(dev_list, force=False):
"""Unbind method, takes a list of device locations"""
dev_list = map(dev_id_from_dev_name, dev_list)
for d in dev_list:
unbind_one(d, force)
def bind_all(dev_list, driver, force=False):
"""Bind method, takes a list of device locations"""
global devices
dev_list = map(dev_id_from_dev_name, dev_list)
for d in dev_list:
bind_one(d, driver, force)
# when binding devices to a generic driver (i.e. one that doesn't have a
# PCI ID table), some devices that are not bound to any other driver could
# be bound even if no one has asked them to. hence, we check the list of
# drivers again, and see if some of the previously-unbound devices were
# erroneously bound.
for d in devices.keys():
# skip devices that were already bound or that we know should be bound
if "Driver_str" in devices[d] or d in dev_list:
continue
# update information about this device
devices[d] = dict(devices[d].items() +
get_pci_device_details(d).items())
# check if updated information indicates that the device was bound
if "Driver_str" in devices[d]:
unbind_one(d, force)
def display_devices(title, dev_list, extra_params=None):
'''Displays to the user the details of a list of devices given in
"dev_list". The "extra_params" parameter, if given, should contain a string
with %()s fields in it for replacement by the named fields in each
device's dictionary.'''
strings = [] # this holds the strings to print. We sort before printing
print("\n%s" % title)
print("="*len(title))
if len(dev_list) == 0:
strings.append("<none>")
else:
for dev in dev_list:
if extra_params is not None:
strings.append("%s '%s' %s" % (dev["Slot"],
dev["Device_str"], extra_params % dev))
else:
strings.append("%s '%s'" % (dev["Slot"], dev["Device_str"]))
# sort before printing, so that the entries appear in PCI order
strings.sort()
print("\n".join(strings)) # print one per line
def show_status():
'''Function called when the script is passed the "--status" option.
Displays to the user what devices are bound to the igb_uio driver, the
kernel driver or to no driver'''
global dpdk_drivers
kernel_drv = []
dpdk_drv = []
no_drv = []
# split our list of devices into the three categories above
for d in devices.keys():
if not has_driver(d):
no_drv.append(devices[d])
continue
if devices[d]["Driver_str"] in dpdk_drivers:
dpdk_drv.append(devices[d])
else:
kernel_drv.append(devices[d])
# print each category separately, so we can clearly see what's used by DPDK
display_devices("Network devices using DPDK-compatible driver", dpdk_drv,
"drv=%(Driver_str)s unused=%(Module_str)s")
display_devices("Network devices using kernel driver", kernel_drv,
"if=%(Interface)s drv=%(Driver_str)s "
"unused=%(Module_str)s %(Active)s")
display_devices("Other network devices", no_drv, "unused=%(Module_str)s")
def parse_args():
'''Parses the command-line arguments given by the user and takes the
appropriate action for each'''
global b_flag
global status_flag
global force_flag
global args
if len(sys.argv) <= 1:
usage()
sys.exit(0)
try:
opts, args = getopt.getopt(sys.argv[1:], "b:us",
["help", "usage", "status", "force",
"bind=", "unbind"])
except getopt.GetoptError as error:
print(str(error))
print("Run '%s --usage' for further information" % sys.argv[0])
sys.exit(1)
for opt, arg in opts:
if opt == "--help" or opt == "--usage":
usage()
sys.exit(0)
if opt == "--status" or opt == "-s":
status_flag = True
if opt == "--force":
force_flag = True
if opt == "-b" or opt == "-u" or opt == "--bind" or opt == "--unbind":
if b_flag is not None:
print("Error - Only one bind or unbind may be specified\n")
sys.exit(1)
if opt == "-u" or opt == "--unbind":
b_flag = "none"
else:
b_flag = arg
def do_arg_actions():
'''do the actual action requested by the user'''
global b_flag
global status_flag
global force_flag
global args
if b_flag is None and not status_flag:
print("Error: No action specified for devices."
"Please give a -b or -u option")
print("Run '%s --usage' for further information" % sys.argv[0])
sys.exit(1)
if b_flag is not None and len(args) == 0:
print("Error: No devices specified.")
print("Run '%s --usage' for further information" % sys.argv[0])
sys.exit(1)
if b_flag == "none" or b_flag == "None":
unbind_all(args, force_flag)
elif b_flag is not None:
bind_all(args, b_flag, force_flag)
if status_flag:
if b_flag is not None:
get_nic_details() # refresh if we have changed anything
show_status()
def main():
'''program main function'''
parse_args()
check_modules()
get_nic_details()
do_arg_actions()
if __name__ == "__main__":
main()
| gpl-2.0 |
blacklin/kbengine | kbe/src/lib/python/Lib/test/test_parser.py | 113 | 26114 | import parser
import unittest
import sys
import operator
import struct
from test import support
from test.script_helper import assert_python_failure
#
# First, we test that we can generate trees from valid source fragments,
# and that these valid trees are indeed allowed by the tree-loading side
# of the parser module.
#
class RoundtripLegalSyntaxTestCase(unittest.TestCase):
def roundtrip(self, f, s):
st1 = f(s)
t = st1.totuple()
try:
st2 = parser.sequence2st(t)
except parser.ParserError as why:
self.fail("could not roundtrip %r: %s" % (s, why))
self.assertEqual(t, st2.totuple(),
"could not re-generate syntax tree")
def check_expr(self, s):
self.roundtrip(parser.expr, s)
def test_flags_passed(self):
# The unicode literals flags has to be passed from the paser to AST
# generation.
suite = parser.suite("from __future__ import unicode_literals; x = ''")
code = suite.compile()
scope = {}
exec(code, {}, scope)
self.assertIsInstance(scope["x"], str)
def check_suite(self, s):
self.roundtrip(parser.suite, s)
def test_yield_statement(self):
self.check_suite("def f(): yield 1")
self.check_suite("def f(): yield")
self.check_suite("def f(): x += yield")
self.check_suite("def f(): x = yield 1")
self.check_suite("def f(): x = y = yield 1")
self.check_suite("def f(): x = yield")
self.check_suite("def f(): x = y = yield")
self.check_suite("def f(): 1 + (yield)*2")
self.check_suite("def f(): (yield 1)*2")
self.check_suite("def f(): return; yield 1")
self.check_suite("def f(): yield 1; return")
self.check_suite("def f(): yield from 1")
self.check_suite("def f(): x = yield from 1")
self.check_suite("def f(): f((yield from 1))")
self.check_suite("def f(): yield 1; return 1")
self.check_suite("def f():\n"
" for x in range(30):\n"
" yield x\n")
self.check_suite("def f():\n"
" if (yield):\n"
" yield x\n")
def test_nonlocal_statement(self):
self.check_suite("def f():\n"
" x = 0\n"
" def g():\n"
" nonlocal x\n")
self.check_suite("def f():\n"
" x = y = 0\n"
" def g():\n"
" nonlocal x, y\n")
def test_expressions(self):
self.check_expr("foo(1)")
self.check_expr("[1, 2, 3]")
self.check_expr("[x**3 for x in range(20)]")
self.check_expr("[x**3 for x in range(20) if x % 3]")
self.check_expr("[x**3 for x in range(20) if x % 2 if x % 3]")
self.check_expr("list(x**3 for x in range(20))")
self.check_expr("list(x**3 for x in range(20) if x % 3)")
self.check_expr("list(x**3 for x in range(20) if x % 2 if x % 3)")
self.check_expr("foo(*args)")
self.check_expr("foo(*args, **kw)")
self.check_expr("foo(**kw)")
self.check_expr("foo(key=value)")
self.check_expr("foo(key=value, *args)")
self.check_expr("foo(key=value, *args, **kw)")
self.check_expr("foo(key=value, **kw)")
self.check_expr("foo(a, b, c, *args)")
self.check_expr("foo(a, b, c, *args, **kw)")
self.check_expr("foo(a, b, c, **kw)")
self.check_expr("foo(a, *args, keyword=23)")
self.check_expr("foo + bar")
self.check_expr("foo - bar")
self.check_expr("foo * bar")
self.check_expr("foo / bar")
self.check_expr("foo // bar")
self.check_expr("lambda: 0")
self.check_expr("lambda x: 0")
self.check_expr("lambda *y: 0")
self.check_expr("lambda *y, **z: 0")
self.check_expr("lambda **z: 0")
self.check_expr("lambda x, y: 0")
self.check_expr("lambda foo=bar: 0")
self.check_expr("lambda foo=bar, spaz=nifty+spit: 0")
self.check_expr("lambda foo=bar, **z: 0")
self.check_expr("lambda foo=bar, blaz=blat+2, **z: 0")
self.check_expr("lambda foo=bar, blaz=blat+2, *y, **z: 0")
self.check_expr("lambda x, *y, **z: 0")
self.check_expr("(x for x in range(10))")
self.check_expr("foo(x for x in range(10))")
self.check_expr("...")
self.check_expr("a[...]")
def test_simple_expression(self):
# expr_stmt
self.check_suite("a")
def test_simple_assignments(self):
self.check_suite("a = b")
self.check_suite("a = b = c = d = e")
def test_simple_augmented_assignments(self):
self.check_suite("a += b")
self.check_suite("a -= b")
self.check_suite("a *= b")
self.check_suite("a /= b")
self.check_suite("a //= b")
self.check_suite("a %= b")
self.check_suite("a &= b")
self.check_suite("a |= b")
self.check_suite("a ^= b")
self.check_suite("a <<= b")
self.check_suite("a >>= b")
self.check_suite("a **= b")
def test_function_defs(self):
self.check_suite("def f(): pass")
self.check_suite("def f(*args): pass")
self.check_suite("def f(*args, **kw): pass")
self.check_suite("def f(**kw): pass")
self.check_suite("def f(foo=bar): pass")
self.check_suite("def f(foo=bar, *args): pass")
self.check_suite("def f(foo=bar, *args, **kw): pass")
self.check_suite("def f(foo=bar, **kw): pass")
self.check_suite("def f(a, b): pass")
self.check_suite("def f(a, b, *args): pass")
self.check_suite("def f(a, b, *args, **kw): pass")
self.check_suite("def f(a, b, **kw): pass")
self.check_suite("def f(a, b, foo=bar): pass")
self.check_suite("def f(a, b, foo=bar, *args): pass")
self.check_suite("def f(a, b, foo=bar, *args, **kw): pass")
self.check_suite("def f(a, b, foo=bar, **kw): pass")
self.check_suite("@staticmethod\n"
"def f(): pass")
self.check_suite("@staticmethod\n"
"@funcattrs(x, y)\n"
"def f(): pass")
self.check_suite("@funcattrs()\n"
"def f(): pass")
# keyword-only arguments
self.check_suite("def f(*, a): pass")
self.check_suite("def f(*, a = 5): pass")
self.check_suite("def f(*, a = 5, b): pass")
self.check_suite("def f(*, a, b = 5): pass")
self.check_suite("def f(*, a, b = 5, **kwds): pass")
self.check_suite("def f(*args, a): pass")
self.check_suite("def f(*args, a = 5): pass")
self.check_suite("def f(*args, a = 5, b): pass")
self.check_suite("def f(*args, a, b = 5): pass")
self.check_suite("def f(*args, a, b = 5, **kwds): pass")
# function annotations
self.check_suite("def f(a: int): pass")
self.check_suite("def f(a: int = 5): pass")
self.check_suite("def f(*args: list): pass")
self.check_suite("def f(**kwds: dict): pass")
self.check_suite("def f(*, a: int): pass")
self.check_suite("def f(*, a: int = 5): pass")
self.check_suite("def f() -> int: pass")
def test_class_defs(self):
self.check_suite("class foo():pass")
self.check_suite("class foo(object):pass")
self.check_suite("@class_decorator\n"
"class foo():pass")
self.check_suite("@class_decorator(arg)\n"
"class foo():pass")
self.check_suite("@decorator1\n"
"@decorator2\n"
"class foo():pass")
def test_import_from_statement(self):
self.check_suite("from sys.path import *")
self.check_suite("from sys.path import dirname")
self.check_suite("from sys.path import (dirname)")
self.check_suite("from sys.path import (dirname,)")
self.check_suite("from sys.path import dirname as my_dirname")
self.check_suite("from sys.path import (dirname as my_dirname)")
self.check_suite("from sys.path import (dirname as my_dirname,)")
self.check_suite("from sys.path import dirname, basename")
self.check_suite("from sys.path import (dirname, basename)")
self.check_suite("from sys.path import (dirname, basename,)")
self.check_suite(
"from sys.path import dirname as my_dirname, basename")
self.check_suite(
"from sys.path import (dirname as my_dirname, basename)")
self.check_suite(
"from sys.path import (dirname as my_dirname, basename,)")
self.check_suite(
"from sys.path import dirname, basename as my_basename")
self.check_suite(
"from sys.path import (dirname, basename as my_basename)")
self.check_suite(
"from sys.path import (dirname, basename as my_basename,)")
self.check_suite("from .bogus import x")
def test_basic_import_statement(self):
self.check_suite("import sys")
self.check_suite("import sys as system")
self.check_suite("import sys, math")
self.check_suite("import sys as system, math")
self.check_suite("import sys, math as my_math")
def test_relative_imports(self):
self.check_suite("from . import name")
self.check_suite("from .. import name")
# check all the way up to '....', since '...' is tokenized
# differently from '.' (it's an ellipsis token).
self.check_suite("from ... import name")
self.check_suite("from .... import name")
self.check_suite("from .pkg import name")
self.check_suite("from ..pkg import name")
self.check_suite("from ...pkg import name")
self.check_suite("from ....pkg import name")
def test_pep263(self):
self.check_suite("# -*- coding: iso-8859-1 -*-\n"
"pass\n")
def test_assert(self):
self.check_suite("assert alo < ahi and blo < bhi\n")
def test_with(self):
self.check_suite("with open('x'): pass\n")
self.check_suite("with open('x') as f: pass\n")
self.check_suite("with open('x') as f, open('y') as g: pass\n")
def test_try_stmt(self):
self.check_suite("try: pass\nexcept: pass\n")
self.check_suite("try: pass\nfinally: pass\n")
self.check_suite("try: pass\nexcept A: pass\nfinally: pass\n")
self.check_suite("try: pass\nexcept A: pass\nexcept: pass\n"
"finally: pass\n")
self.check_suite("try: pass\nexcept: pass\nelse: pass\n")
self.check_suite("try: pass\nexcept: pass\nelse: pass\n"
"finally: pass\n")
def test_position(self):
# An absolutely minimal test of position information. Better
# tests would be a big project.
code = "def f(x):\n return x + 1"
st1 = parser.suite(code)
st2 = st1.totuple(line_info=1, col_info=1)
def walk(tree):
node_type = tree[0]
next = tree[1]
if isinstance(next, tuple):
for elt in tree[1:]:
for x in walk(elt):
yield x
else:
yield tree
terminals = list(walk(st2))
self.assertEqual([
(1, 'def', 1, 0),
(1, 'f', 1, 4),
(7, '(', 1, 5),
(1, 'x', 1, 6),
(8, ')', 1, 7),
(11, ':', 1, 8),
(4, '', 1, 9),
(5, '', 2, -1),
(1, 'return', 2, 4),
(1, 'x', 2, 11),
(14, '+', 2, 13),
(2, '1', 2, 15),
(4, '', 2, 16),
(6, '', 2, -1),
(4, '', 2, -1),
(0, '', 2, -1)],
terminals)
def test_extended_unpacking(self):
self.check_suite("*a = y")
self.check_suite("x, *b, = m")
self.check_suite("[*a, *b] = y")
self.check_suite("for [*x, b] in x: pass")
def test_raise_statement(self):
self.check_suite("raise\n")
self.check_suite("raise e\n")
self.check_suite("try:\n"
" suite\n"
"except Exception as e:\n"
" raise ValueError from e\n")
def test_set_displays(self):
self.check_expr('{2}')
self.check_expr('{2,}')
self.check_expr('{2, 3}')
self.check_expr('{2, 3,}')
def test_dict_displays(self):
self.check_expr('{}')
self.check_expr('{a:b}')
self.check_expr('{a:b,}')
self.check_expr('{a:b, c:d}')
self.check_expr('{a:b, c:d,}')
def test_set_comprehensions(self):
self.check_expr('{x for x in seq}')
self.check_expr('{f(x) for x in seq}')
self.check_expr('{f(x) for x in seq if condition(x)}')
def test_dict_comprehensions(self):
self.check_expr('{x:x for x in seq}')
self.check_expr('{x**2:x[3] for x in seq if condition(x)}')
self.check_expr('{x:x for x in seq1 for y in seq2 if condition(x, y)}')
#
# Second, we take *invalid* trees and make sure we get ParserError
# rejections for them.
#
class IllegalSyntaxTestCase(unittest.TestCase):
def check_bad_tree(self, tree, label):
try:
parser.sequence2st(tree)
except parser.ParserError:
pass
else:
self.fail("did not detect invalid tree for %r" % label)
def test_junk(self):
# not even remotely valid:
self.check_bad_tree((1, 2, 3), "<junk>")
def test_illegal_yield_1(self):
# Illegal yield statement: def f(): return 1; yield 1
tree = \
(257,
(264,
(285,
(259,
(1, 'def'),
(1, 'f'),
(260, (7, '('), (8, ')')),
(11, ':'),
(291,
(4, ''),
(5, ''),
(264,
(265,
(266,
(272,
(275,
(1, 'return'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302, (303, (304, (305, (2, '1')))))))))))))))))),
(264,
(265,
(266,
(272,
(276,
(1, 'yield'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302,
(303, (304, (305, (2, '1')))))))))))))))))),
(4, ''))),
(6, ''))))),
(4, ''),
(0, ''))))
self.check_bad_tree(tree, "def f():\n return 1\n yield 1")
def test_illegal_yield_2(self):
# Illegal return in generator: def f(): return 1; yield 1
tree = \
(257,
(264,
(265,
(266,
(278,
(1, 'from'),
(281, (1, '__future__')),
(1, 'import'),
(279, (1, 'generators')))),
(4, ''))),
(264,
(285,
(259,
(1, 'def'),
(1, 'f'),
(260, (7, '('), (8, ')')),
(11, ':'),
(291,
(4, ''),
(5, ''),
(264,
(265,
(266,
(272,
(275,
(1, 'return'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302, (303, (304, (305, (2, '1')))))))))))))))))),
(264,
(265,
(266,
(272,
(276,
(1, 'yield'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302,
(303, (304, (305, (2, '1')))))))))))))))))),
(4, ''))),
(6, ''))))),
(4, ''),
(0, ''))))
self.check_bad_tree(tree, "def f():\n return 1\n yield 1")
def test_a_comma_comma_c(self):
# Illegal input: a,,c
tree = \
(258,
(311,
(290,
(291,
(292,
(293,
(295,
(296,
(297,
(298, (299, (300, (301, (302, (303, (1, 'a')))))))))))))),
(12, ','),
(12, ','),
(290,
(291,
(292,
(293,
(295,
(296,
(297,
(298, (299, (300, (301, (302, (303, (1, 'c'))))))))))))))),
(4, ''),
(0, ''))
self.check_bad_tree(tree, "a,,c")
def test_illegal_operator(self):
# Illegal input: a $= b
tree = \
(257,
(264,
(265,
(266,
(267,
(312,
(291,
(292,
(293,
(294,
(296,
(297,
(298,
(299,
(300, (301, (302, (303, (304, (1, 'a'))))))))))))))),
(268, (37, '$=')),
(312,
(291,
(292,
(293,
(294,
(296,
(297,
(298,
(299,
(300, (301, (302, (303, (304, (1, 'b'))))))))))))))))),
(4, ''))),
(0, ''))
self.check_bad_tree(tree, "a $= b")
def test_malformed_global(self):
#doesn't have global keyword in ast
tree = (257,
(264,
(265,
(266,
(282, (1, 'foo'))), (4, ''))),
(4, ''),
(0, ''))
self.check_bad_tree(tree, "malformed global ast")
def test_missing_import_source(self):
# from import fred
tree = \
(257,
(268,
(269,
(270,
(282,
(284, (1, 'from'), (1, 'import'),
(287, (285, (1, 'fred')))))),
(4, ''))),
(4, ''), (0, ''))
self.check_bad_tree(tree, "from import fred")
class CompileTestCase(unittest.TestCase):
# These tests are very minimal. :-(
def test_compile_expr(self):
st = parser.expr('2 + 3')
code = parser.compilest(st)
self.assertEqual(eval(code), 5)
def test_compile_suite(self):
st = parser.suite('x = 2; y = x + 3')
code = parser.compilest(st)
globs = {}
exec(code, globs)
self.assertEqual(globs['y'], 5)
def test_compile_error(self):
st = parser.suite('1 = 3 + 4')
self.assertRaises(SyntaxError, parser.compilest, st)
def test_compile_badunicode(self):
st = parser.suite('a = "\\U12345678"')
self.assertRaises(SyntaxError, parser.compilest, st)
st = parser.suite('a = "\\u1"')
self.assertRaises(SyntaxError, parser.compilest, st)
def test_issue_9011(self):
# Issue 9011: compilation of an unary minus expression changed
# the meaning of the ST, so that a second compilation produced
# incorrect results.
st = parser.expr('-3')
code1 = parser.compilest(st)
self.assertEqual(eval(code1), -3)
code2 = parser.compilest(st)
self.assertEqual(eval(code2), -3)
class ParserStackLimitTestCase(unittest.TestCase):
"""try to push the parser to/over its limits.
see http://bugs.python.org/issue1881 for a discussion
"""
def _nested_expression(self, level):
return "["*level+"]"*level
def test_deeply_nested_list(self):
# XXX used to be 99 levels in 2.x
e = self._nested_expression(93)
st = parser.expr(e)
st.compile()
def test_trigger_memory_error(self):
e = self._nested_expression(100)
rc, out, err = assert_python_failure('-c', e)
# parsing the expression will result in an error message
# followed by a MemoryError (see #11963)
self.assertIn(b's_push: parser stack overflow', err)
self.assertIn(b'MemoryError', err)
class STObjectTestCase(unittest.TestCase):
"""Test operations on ST objects themselves"""
def test_comparisons(self):
# ST objects should support order and equality comparisons
st1 = parser.expr('2 + 3')
st2 = parser.suite('x = 2; y = x + 3')
st3 = parser.expr('list(x**3 for x in range(20))')
st1_copy = parser.expr('2 + 3')
st2_copy = parser.suite('x = 2; y = x + 3')
st3_copy = parser.expr('list(x**3 for x in range(20))')
# exercise fast path for object identity
self.assertEqual(st1 == st1, True)
self.assertEqual(st2 == st2, True)
self.assertEqual(st3 == st3, True)
# slow path equality
self.assertEqual(st1, st1_copy)
self.assertEqual(st2, st2_copy)
self.assertEqual(st3, st3_copy)
self.assertEqual(st1 == st2, False)
self.assertEqual(st1 == st3, False)
self.assertEqual(st2 == st3, False)
self.assertEqual(st1 != st1, False)
self.assertEqual(st2 != st2, False)
self.assertEqual(st3 != st3, False)
self.assertEqual(st1 != st1_copy, False)
self.assertEqual(st2 != st2_copy, False)
self.assertEqual(st3 != st3_copy, False)
self.assertEqual(st2 != st1, True)
self.assertEqual(st1 != st3, True)
self.assertEqual(st3 != st2, True)
# we don't particularly care what the ordering is; just that
# it's usable and self-consistent
self.assertEqual(st1 < st2, not (st2 <= st1))
self.assertEqual(st1 < st3, not (st3 <= st1))
self.assertEqual(st2 < st3, not (st3 <= st2))
self.assertEqual(st1 < st2, st2 > st1)
self.assertEqual(st1 < st3, st3 > st1)
self.assertEqual(st2 < st3, st3 > st2)
self.assertEqual(st1 <= st2, st2 >= st1)
self.assertEqual(st3 <= st1, st1 >= st3)
self.assertEqual(st2 <= st3, st3 >= st2)
# transitivity
bottom = min(st1, st2, st3)
top = max(st1, st2, st3)
mid = sorted([st1, st2, st3])[1]
self.assertTrue(bottom < mid)
self.assertTrue(bottom < top)
self.assertTrue(mid < top)
self.assertTrue(bottom <= mid)
self.assertTrue(bottom <= top)
self.assertTrue(mid <= top)
self.assertTrue(bottom <= bottom)
self.assertTrue(mid <= mid)
self.assertTrue(top <= top)
# interaction with other types
self.assertEqual(st1 == 1588.602459, False)
self.assertEqual('spanish armada' != st2, True)
self.assertRaises(TypeError, operator.ge, st3, None)
self.assertRaises(TypeError, operator.le, False, st1)
self.assertRaises(TypeError, operator.lt, st1, 1815)
self.assertRaises(TypeError, operator.gt, b'waterloo', st2)
check_sizeof = support.check_sizeof
@support.cpython_only
def test_sizeof(self):
def XXXROUNDUP(n):
if n <= 1:
return n
if n <= 128:
return (n + 3) & ~3
return 1 << (n - 1).bit_length()
basesize = support.calcobjsize('Pii')
nodesize = struct.calcsize('hP3iP0h')
def sizeofchildren(node):
if node is None:
return 0
res = 0
hasstr = len(node) > 1 and isinstance(node[-1], str)
if hasstr:
res += len(node[-1]) + 1
children = node[1:-1] if hasstr else node[1:]
if children:
res += XXXROUNDUP(len(children)) * nodesize
for child in children:
res += sizeofchildren(child)
return res
def check_st_sizeof(st):
self.check_sizeof(st, basesize + nodesize +
sizeofchildren(st.totuple()))
check_st_sizeof(parser.expr('2 + 3'))
check_st_sizeof(parser.expr('2 + 3 + 4'))
check_st_sizeof(parser.suite('x = 2 + 3'))
check_st_sizeof(parser.suite(''))
check_st_sizeof(parser.suite('# -*- coding: utf-8 -*-'))
check_st_sizeof(parser.expr('[' + '2,' * 1000 + ']'))
# XXX tests for pickling and unpickling of ST objects should go here
class OtherParserCase(unittest.TestCase):
def test_two_args_to_expr(self):
# See bug #12264
with self.assertRaises(TypeError):
parser.expr("a", "b")
def test_main():
support.run_unittest(
RoundtripLegalSyntaxTestCase,
IllegalSyntaxTestCase,
CompileTestCase,
ParserStackLimitTestCase,
STObjectTestCase,
OtherParserCase,
)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
kritak/textdungeon | Internal/pricerandomtester.py | 1 | 1114 | """testing random frequency of items based on price for item.
a cheap item is more common, a expensive item is very rare"""
import random
d = {"healing":50,
"berserk":60,
"clever":100,
"swiftness":100,
"might":100,
"awesomeness":500,
}
# reverse d
dr = [[1/b,a] for [a,b] in d.items()] # list of [price, drinkname]
dr.sort() # sort this list by price
pricelist1 = [a for [a,b] in dr] # list of price only
drinklist = [b for [a,b] in dr] # list of drinkname only
pricelist2 = [] # list of added up prices
kprice = 0
for p in pricelist1:
kprice += p
pricelist2.append(kprice)
print(pricelist1, pricelist2)
result = {}
print("calculating please wait...")
for x in range(10000):
y = random.random()*(pricelist2[-1]) # 1 to maxprice
for p in pricelist2:
if y < p:
drinkname = drinklist[pricelist2.index(p)]
if drinkname in result:
result[drinkname] += 1
else:
result[drinkname] = 1
break
print(result)
| gpl-2.0 |
developerfm/zulip | api/integrations/hg/zulip-changegroup.py | 114 | 6096 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Zulip hook for Mercurial changeset pushes.
# Copyright © 2012-2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#
# This hook is called when changesets are pushed to the master repository (ie
# `hg push`). See https://zulip.com/integrations for installation instructions.
import zulip
VERSION = "0.9"
def format_summary_line(web_url, user, base, tip, branch, node):
"""
Format the first line of the message, which contains summary
information about the changeset and links to the changelog if a
web URL has been configured:
Jane Doe <[email protected]> pushed 1 commit to master (170:e494a5be3393):
"""
revcount = tip - base
plural = "s" if revcount > 1 else ""
if web_url:
shortlog_base_url = web_url.rstrip("/") + "/shortlog/"
summary_url = "{shortlog}{tip}?revcount={revcount}".format(
shortlog=shortlog_base_url, tip=tip - 1, revcount=revcount)
formatted_commit_count = "[{revcount} commit{s}]({url})".format(
revcount=revcount, s=plural, url=summary_url)
else:
formatted_commit_count = "{revcount} commit{s}".format(
revcount=revcount, s=plural)
return u"**{user}** pushed {commits} to **{branch}** (`{tip}:{node}`):\n\n".format(
user=user, commits=formatted_commit_count, branch=branch, tip=tip,
node=node[:12])
def format_commit_lines(web_url, repo, base, tip):
"""
Format the per-commit information for the message, including the one-line
commit summary and a link to the diff if a web URL has been configured:
"""
if web_url:
rev_base_url = web_url.rstrip("/") + "/rev/"
commit_summaries = []
for rev in range(base, tip):
rev_node = repo.changelog.node(rev)
rev_ctx = repo.changectx(rev_node)
one_liner = rev_ctx.description().split("\n")[0]
if web_url:
summary_url = rev_base_url + str(rev_ctx)
summary = "* [{summary}]({url})".format(
summary=one_liner, url=summary_url)
else:
summary = "* {summary}".format(summary=one_liner)
commit_summaries.append(summary)
return "\n".join(summary for summary in commit_summaries)
def send_zulip(email, api_key, site, stream, subject, content):
"""
Send a message to Zulip using the provided credentials, which should be for
a bot in most cases.
"""
client = zulip.Client(email=email, api_key=api_key,
site=site,
client="ZulipMercurial/" + VERSION)
message_data = {
"type": "stream",
"to": stream,
"subject": subject,
"content": content,
}
client.send_message(message_data)
def get_config(ui, item):
try:
# configlist returns everything in lists.
return ui.configlist('zulip', item)[0]
except IndexError:
return None
def hook(ui, repo, **kwargs):
"""
Invoked by configuring a [hook] entry in .hg/hgrc.
"""
hooktype = kwargs["hooktype"]
node = kwargs["node"]
ui.debug("Zulip: received {hooktype} event\n".format(hooktype=hooktype))
if hooktype != "changegroup":
ui.warn("Zulip: {hooktype} not supported\n".format(hooktype=hooktype))
exit(1)
ctx = repo.changectx(node)
branch = ctx.branch()
# If `branches` isn't specified, notify on all branches.
branch_whitelist = get_config(ui, "branches")
branch_blacklist = get_config(ui, "ignore_branches")
if branch_whitelist:
# Only send notifications on branches we are watching.
watched_branches = [b.lower().strip() for b in branch_whitelist.split(",")]
if branch.lower() not in watched_branches:
ui.debug("Zulip: ignoring event for {branch}\n".format(branch=branch))
exit(0)
if branch_blacklist:
# Don't send notifications for branches we've ignored.
ignored_branches = [b.lower().strip() for b in branch_blacklist.split(",")]
if branch.lower() in ignored_branches:
ui.debug("Zulip: ignoring event for {branch}\n".format(branch=branch))
exit(0)
# The first and final commits in the changeset.
base = repo[node].rev()
tip = len(repo)
email = get_config(ui, "email")
api_key = get_config(ui, "api_key")
site = get_config(ui, "site")
if not (email and api_key):
ui.warn("Zulip: missing email or api_key configurations\n")
ui.warn("in the [zulip] section of your .hg/hgrc.\n")
exit(1)
stream = get_config(ui, "stream")
# Give a default stream if one isn't provided.
if not stream:
stream = "commits"
web_url = get_config(ui, "web_url")
user = ctx.user()
content = format_summary_line(web_url, user, base, tip, branch, node)
content += format_commit_lines(web_url, repo, base, tip)
subject = branch
ui.debug("Sending to Zulip:\n")
ui.debug(content + "\n")
send_zulip(email, api_key, site, stream, subject, content)
| apache-2.0 |
naototty/pyflag | src/plugins_old/MemoryForensics/VolatilityCommon.py | 7 | 3679 | import os,sys
import pyflag.IO as IO
import pyflag.FlagFramework as FlagFramework
## Find and insert the volatility modules
volatility_path = None
for d in os.listdir(os.path.dirname(__file__)):
if d.startswith("Volatility-1.3"):
## Check that volatility is actually in there
path = os.path.join(os.path.dirname(__file__),d)
if os.access(os.path.join(path,"vtypes.py"),os.F_OK):
volatility_path = path
break
## We need to make sure that we get in before an older version
if volatility_path and volatility_path not in sys.path:
sys.path.insert(0,volatility_path)
import forensics.addrspace
## This is a big hack because Volatility is difficult to work with -
## we want to pass Volatility an already made address space but there
## is no way to do this. Volatility calls the FileAddressSpace in
## multiple places and actually tries to open the raw file several
## times. We would essentially need to recode all the volatility
## functions to accept a ready address space.
## But after all, this is python so we can do lots of magic. We
## basically dynamically change the FileAddressSpace definition in
## volatility itself (which normally accepts a filename) to accept an
## IOSource name, then we effectively call it with the name as an
## image name. When volatility tries to open the said filename, it
## will be transparently opening a PyFlag iosource of our choosing.
try:
forensics.addrspace.FileAddressSpace.case
except AttributeError:
## Only do this if its not done already
class IOSourceAddressSpace(forensics.addrspace.FileAddressSpace):
case = None
iosource = None
def __init__(self, name, mode='rb', fast=False):
self.case, self.iosource = name.split("/",1)
fd = IO.open(self.case, self.iosource)
self.fhandle = fd
self.fsize = fd.size
self.fast_fhandle = fd
self.fname = name
self.name = name
## Patch it in:
forensics.addrspace.FileAddressSpace = IOSourceAddressSpace
## We need to reimplement these functions in a sane way (Currently
## they try to open the file directly)
import vutils
def is_crash_dump(filename):
fd = forensics.addrspace.FileAddressSpace(filename)
if fd.read(0, 8) == "PAGEDUMP":
return True
return False
def is_hiberfil(filename):
fd = forensics.addrspace.FileAddressSpace(filename)
if fd.read(0, 4) == 'hibr':
return True
return False
vutils.is_crash_dump = is_crash_dump
vutils.is_hiberfil = is_hiberfil
## Make sure we initialise Volatility plugins
import forensics.registry as MemoryRegistry
MemoryRegistry.Init()
## These are common column types
from pyflag.ColumnTypes import BigIntegerType
class MemoryOffset(BigIntegerType):
inode_id_column = "Inode"
""" A column type to link to the offset of an inode """
def plain_display_hook(self, value, row, result):
offset = int(value)
inode_id = row[self.inode_id_column]
target = FlagFramework.query_type(family="Disk Forensics",
report="ViewFile",
offset=value,
inode_id=inode_id,
case=self.case,
mode="HexDump")
try:
target['_prebuffer'] = self.prebuffer
except AttributeError: pass
result.link("0x%08X" % offset, target=target, pane='new')
display_hooks = [ plain_display_hook ]
| gpl-2.0 |
SPARLab/BikeMaps | mapApp/views/__init__.py | 1 | 1138 | from .about import about, contact
from .alerts import alertUsers, postAlertPolygon, readAlertPoint
from .disclaimer import disclaimer
from .edit import editHazards, editShape, updateHazard
from .index import index
from .postPoint import (postHazard, postIncident, postNearmiss,
postNewInfrastructure, postTheft)
from .pushNotification import pushNotification
from .recentReports import recentReports
from .restApi import (AlertAreaDetail, AlertAreaList, APNSDeviceDetail,
APNSDeviceList, CollisionList, FilteredHazardList,
FilteredTheftList, GCMDeviceDetail, GCMDeviceList,
HazardList, IncidentList, NearmissList, OfficialList,
TheftList, TinyCollisionList, TinyHazardList,
TinyNearMissList, TinyNewInfrastructureList,
TinyTheftList, UserDetail, UserList, XHRCollisionInfo,
XHRHazardInfo, XHRNearMissInfo, XHRNewInfrastructureInfo,
XHRTheftInfo)
from .termsAndConditions import termsAndConditions
from .vis import vis
| mit |
JulianAtGitHub/CocosBuilderExtend | CocosBuilder/libs/cocos2d-iphone/tools/template_generator.py | 46 | 8351 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Xcode 4 template generator for cocos2d project
# (c) 2011 Ricardo Quesada
#
# LICENSE: Dual License: MIT & GNU GPL v2 Whatever suits you best.
#
# Given a directory, it generates the "Definitions" and "Nodes" elements
#
# Format taken from: http://blog.boreal-kiss.net/2011/03/11/a-minimal-project-template-for-xcode-4/
# ----------------------------------------------------------------------------
'''
Xcode 4 template generator
'''
__docformat__ = 'restructuredtext'
_template_open_body = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<!-- FILE AUTOGENERATED BY cocos2d/tools/template_generator.py DO NOT EDIT -->
<plist version="1.0">
<dict>
<key>Description</key>
<string>This is a template description.</string>
<key>Identifier</key>
<string>com.cocos2d-v2.%s</string>
<key>Kind</key>
<string>Xcode.Xcode3.ProjectTemplateUnitKind</string>"""
_template_close_body = "</dict>\n</plist>"
_template_header_path= """<key>Targets</key>
<array>
<dict>
<key>SharedSettings</key>
<dict>
<key>HEADER_SEARCH_PATHS</key>
<string>%s</string>
</dict>
</dict>
</array>"""
_template_user_header_path= """<key>Targets</key>
<array>
<dict>
<key>SharedSettings</key>
<dict>
<key>ALWAYS_SEARCH_USER_PATHS</key>
<string>YES</string>
<key>USER_HEADER_SEARCH_PATHS</key>
<string>%s</string>
</dict>
</dict>
</array>"""
_template_ancestor = """ <key>Ancestors</key>
<array>
<string>%s</string>
</array>"""
# python
import sys
import os
import getopt
import glob
class Xcode4Template(object):
def __init__(self, directory, group=0, identifier="XXX", header_path=None, user_header_path=None, ancestor=None):
self.directory = directory
self.files_to_include = []
self.wildcard = '*'
self.ignore_extensions = ['h','txt','html','patch','cmake', 'py', 'markdown', 'md', 'graffle', 'sh', 'ini', 'bridgesupport', 'tbl', 'msg']
self.ignore_directories = ['docs', 'html']
self.group_start_index = group # eg: if 1 then libs/cocos2d/support -> ["cocos2d", "support"] ignoring "libs"
self.output = []
self.identifier = identifier
self.header_path = header_path
self.user_header_path = user_header_path
self.ancestor = ancestor
def scandirs(self, path):
for currentFile in glob.glob(os.path.join(path, self.wildcard)):
if os.path.isdir(currentFile):
self.scandirs(currentFile)
else:
self.files_to_include.append(currentFile)
#
# append the definitions
#
def append_definition(self, output_body, path, group, dont_index):
output_body.append("\t\t<key>%s</key>" % path)
output_body.append("\t\t<dict>")
if group:
output_body.append("\t\t\t<key>Group</key>")
output_body.append("\t\t\t<array>")
for g in group:
output_body.append("\t\t\t\t<string>%s</string>" % g)
output_body.append("\t\t\t</array>")
output_body.append("\t\t\t<key>Path</key>\n\t\t\t<string>%s</string>" % path)
if dont_index:
output_body.append("\t\t\t<key>TargetIndices</key>\n\t\t\t<array/>")
output_body.append("\t\t</dict>")
#
# Generate the "Definitions" section
#
def generate_definitions(self):
output_header = "\t<key>Definitions</key>"
output_dict_open = "\t<dict>"
output_dict_close = "\t</dict>"
output_body = []
for path in self.files_to_include:
# group name
group = []
# obtain group name from directory
dirs = os.path.dirname(path)
lastdir = dirs.split(os.path.sep)[-1]
if lastdir in self.ignore_directories:
sys.stderr.write('Ignoring definition: "%s" because it is in directory: "%s"\n' % (os.path.basename(path), lastdir))
continue
group = dirs.split('/')
group = group[self.group_start_index:]
# get the extension
filename = os.path.basename(path)
name_extension= filename.split('.')
extension = None
if len(name_extension) == 2:
extension = name_extension[1]
self.append_definition(output_body, path, group, extension in self.ignore_extensions)
self.output.append(output_header)
self.output.append(output_dict_open)
self.output.append("\n".join(output_body))
self.output.append(output_dict_close)
#
# Generates the "Nodes" section
#
def generate_nodes(self):
output_header = "\t<key>Nodes</key>"
output_open = "\t<array>"
output_close = "\t</array>"
output_body = []
for path in self.files_to_include:
lastdir = os.path.dirname(path).split(os.path.sep)[-1]
if lastdir in self.ignore_directories:
sys.stderr.write('Ignoring node: "%s" because it is in directory: "%s"\n' % (os.path.basename(path), lastdir))
continue
output_body.append("\t\t<string>%s</string>" % path)
self.output.append(output_header)
self.output.append(output_open)
self.output.append("\n".join(output_body))
self.output.append(output_close)
#
# Generate ancestors
#
def generate_ancestor(self):
if self.ancestor:
self.output.append(_template_ancestor % self.ancestor)
#
# Generates the include directory
#
def generate_header_path(self):
if self.header_path:
self.output.append(_template_header_path % self.header_path)
if self.user_header_path:
self.output.append(_template_user_header_path % self.user_header_path)
#
# Generates the plist. Send it to to stdout
#
def generate_xml(self):
self.output.append(_template_open_body % self.identifier)
self.generate_ancestor()
self.generate_definitions()
self.generate_nodes()
self.generate_header_path()
self.output.append(_template_close_body)
print "\n".join(self.output)
def generate(self):
self.scandirs(self.directory)
self.generate_xml()
def help():
print "%s v1.1 - An utility to generate Xcode 4 templates" % sys.argv[0]
print "Usage:"
print "-g --group\t\tdirectory_used_as_starting_group (if 1, then 'libs/cocos2d/Support/' -> ['cocos2d','Support'] ignoring 'libs')"
print "-i --identifier\t\tidentifier (Xcode4 template identifier)"
print "-a --ancestor\t\tancestor identifier. Default: none"
print "--header-path\t\theader search path"
print "--user-header-path\tuser header search path"
print "directory_to_parse"
print "\nExample:"
print "\t%s -i kazmathlib --header-path ___PACKAGENAME___/libs/kazmath/include libs" % sys.argv[0]
print "\t%s -i cocos2dlib libs" % sys.argv[0]
sys.exit(-1)
if __name__ == "__main__":
if len(sys.argv) == 1:
help()
directory = None
group = 0
identifier = None
header_path= None
user_header_path= None
ancestor = None
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "a:g:i:", ["ancestor=","group=","identifier=","header-path=", "user-header-path="])
if len(args) == 0:
help()
for opt, arg in opts:
if opt in ("-g","--group"):
group = arg
if opt in ("-i","--identifier"):
identifier = arg
if opt in ["--header-path"]:
header_path= arg
if opt in ["--user-header-path"]:
user_header_path= arg
if opt in ("-a", "--ancestor"):
ancestor = arg
except getopt.GetoptError,e:
print e
directory = args[0]
if directory == None:
help()
gen = Xcode4Template(directory=directory, group=int(group), identifier=identifier, header_path=header_path, user_header_path=user_header_path, ancestor=ancestor)
gen.generate()
| mit |
unt-libraries/django-name | name/api/serializers.py | 1 | 6208 | """Serializers for the Name App Models.
This module leverages the Django Rest Framework's Serializer
components to build JSON representations of the models defined
in this app.
These JSON representations are designed to be backwards compatible
with the API documented in previous versions.
For documentation regarding the Django Rest Framework Serializers go
to http://www.django-rest-framework.org/api-guide/serializers/
"""
from rest_framework import serializers
from .. import models
class IdentifierSerializer(serializers.ModelSerializer):
"""Serializer for the Identifier Model.
The following fields have been renamed for backwards compatibility
with previous versions of the API.
label -> identifier.type
href -> identifier.value
"""
label = serializers.StringRelatedField(source='type')
href = serializers.CharField(source='value')
class Meta:
model = models.Identifier
fields = ('label', 'href')
class NoteSerializer(serializers.ModelSerializer):
"""Serializer for the Note Model."""
type = serializers.SerializerMethodField()
class Meta:
model = models.Note
fields = ('note', 'type')
def get_type(self, obj):
"""Sets the type field.
Returns the Note Type label, instead of the Note Type ID, which
is the default behavior.
"""
return obj.get_note_type_label().lower()
class VariantSerializer(serializers.ModelSerializer):
"""Serializer for the Variant Model."""
type = serializers.SerializerMethodField()
class Meta:
model = models.Variant
fields = ('variant', 'type')
def get_type(self, obj):
"""Sets the type field.
Returns the Variant Type label, instead of the Variant Type ID,
which is the default behavior.
"""
return obj.get_variant_type_label().lower()
class NameSerializer(serializers.ModelSerializer):
"""Serializer for the Name Model.
This serializes the the Name model to include detailed information
about the object, including the related Variants, Notes, and
Identifiers.
The following fields have been renamed for backwards compatibility
with previous versions of the API.
authoritative_name -> name.name
begin_date -> name.begin
end_date -> name.end
The identifier field is the absolute url to the name detail
page for the model instance.
"""
authoritative_name = serializers.CharField(source='name')
begin_date = serializers.CharField(source='begin')
name_type = serializers.SerializerMethodField()
end_date = serializers.CharField(source='end')
links = IdentifierSerializer(many=True, source='identifier_set')
notes = NoteSerializer(many=True, source='note_set')
variants = VariantSerializer(many=True, source='variant_set')
identifier = serializers.HyperlinkedIdentityField(
view_name='name:detail', lookup_field='name_id')
class Meta:
model = models.Name
fields = ('authoritative_name', 'name_type', 'begin_date', 'end_date',
'identifier', 'links', 'notes', 'variants',)
def get_name_type(self, obj):
"""Sets the name_type field.
Returns the Name Type label, instead of the Name Type ID, which
is the default behavior.
"""
return obj.get_name_type_label().lower()
class NameSearchSerializer(serializers.ModelSerializer):
"""Name Model Serializer for the Name search/autocompletion
endpoint.
The following fields have been renamed for backwards compatibility
with previous versions of the API.
begin_date -> name.begin
type -> name.get_name_type_label()
label -> Formats name.name and name.disambiguation.
The URL field is the absolute url to the name detail page for
the model instance.
"""
begin_date = serializers.CharField(source='begin')
type = serializers.SerializerMethodField()
label = serializers.SerializerMethodField()
URL = serializers.HyperlinkedIdentityField(
view_name='name:detail', lookup_field='name_id')
class Meta:
model = models.Name
fields = ('id', 'name', 'label', 'type', 'begin_date',
'disambiguation', 'URL')
def get_type(self, obj):
"""Sets the type field.
Returns the Name Type label, instead of the Name Type ID, which
is the default behavior.
"""
return obj.get_name_type_label().lower()
def get_label(self, obj):
"""Sets the label field.
Returns a string in the form of
"<name.name> (<name.disambiguation>)"
"""
if obj.disambiguation:
return '{0} ({1})'.format(obj.name, obj.disambiguation)
return obj.name
class LocationSerializer(serializers.ModelSerializer):
"""Serailizer for the Locations Model.
This includes the related Name via the belong_to_name field. The
belong_to_name field uses the NameSerializer to nest the related
Name model.
"""
belong_to_name = NameSerializer()
class Meta:
model = models.Location
fields = '__all__'
class NameStatisticsMonthSerializer(serializers.Serializer):
"""Serializer for the NameStatisticsMonth object."""
total = serializers.IntegerField()
total_to_date = serializers.IntegerField()
month = serializers.DateTimeField()
class NameStatisticsTypeSerializer(serializers.Serializer):
"""Serializer for the NameStatisticsType object.
This serializer utilizes the NameStatisticsTypeMonth to serialize
the NameStatisticsMonth instances that the object instance contains.
"""
running_total = serializers.IntegerField()
stats = NameStatisticsMonthSerializer(many=True)
class NameStatisticsSerializer(serializers.Serializer):
"""Serializer for the NameStatistics object.
This serializer utilizes the NameStatisticsTypeSerializer to
serialize the NameStatisticsType instances that the object instance
contains.
"""
created = NameStatisticsTypeSerializer()
modified = NameStatisticsTypeSerializer()
name_type_totals = serializers.DictField()
| bsd-3-clause |
ychen820/microblog | y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/ec2/autoscale/policy.py | 13 | 6223 | # Copyright (c) 2009-2010 Reza Lotun http://reza.lotun.name/
# Copyright (c) 2011 Jann Kleen
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.resultset import ResultSet
from boto.ec2.elb.listelement import ListElement
class Alarm(object):
def __init__(self, connection=None):
self.connection = connection
self.name = None
self.alarm_arn = None
def __repr__(self):
return 'Alarm:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'AlarmName':
self.name = value
elif name == 'AlarmARN':
self.alarm_arn = value
else:
setattr(self, name, value)
class AdjustmentType(object):
def __init__(self, connection=None):
self.connection = connection
self.adjustment_type = None
def __repr__(self):
return 'AdjustmentType:%s' % self.adjustment_type
def startElement(self, name, attrs, connection):
return
def endElement(self, name, value, connection):
if name == 'AdjustmentType':
self.adjustment_type = value
return
class MetricCollectionTypes(object):
class BaseType(object):
arg = ''
def __init__(self, connection):
self.connection = connection
self.val = None
def __repr__(self):
return '%s:%s' % (self.arg, self.val)
def startElement(self, name, attrs, connection):
return
def endElement(self, name, value, connection):
if name == self.arg:
self.val = value
class Metric(BaseType):
arg = 'Metric'
class Granularity(BaseType):
arg = 'Granularity'
def __init__(self, connection=None):
self.connection = connection
self.metrics = []
self.granularities = []
def __repr__(self):
return 'MetricCollectionTypes:<%s, %s>' % (self.metrics, self.granularities)
def startElement(self, name, attrs, connection):
if name == 'Granularities':
self.granularities = ResultSet([('member', self.Granularity)])
return self.granularities
elif name == 'Metrics':
self.metrics = ResultSet([('member', self.Metric)])
return self.metrics
def endElement(self, name, value, connection):
return
class ScalingPolicy(object):
def __init__(self, connection=None, **kwargs):
"""
Scaling Policy
:type name: str
:param name: Name of scaling policy.
:type adjustment_type: str
:param adjustment_type: Specifies the type of adjustment. Valid values are `ChangeInCapacity`, `ExactCapacity` and `PercentChangeInCapacity`.
:type as_name: str or int
:param as_name: Name or ARN of the Auto Scaling Group.
:type scaling_adjustment: int
:param scaling_adjustment: Value of adjustment (type specified in `adjustment_type`).
:type min_adjustment_step: int
:param min_adjustment_step: Value of min adjustment step required to
apply the scaling policy (only make sense when use `PercentChangeInCapacity` as adjustment_type.).
:type cooldown: int
:param cooldown: Time (in seconds) before Alarm related Scaling Activities can start after the previous Scaling Activity ends.
"""
self.name = kwargs.get('name', None)
self.adjustment_type = kwargs.get('adjustment_type', None)
self.as_name = kwargs.get('as_name', None)
self.scaling_adjustment = kwargs.get('scaling_adjustment', None)
self.cooldown = kwargs.get('cooldown', None)
self.connection = connection
self.min_adjustment_step = kwargs.get('min_adjustment_step', None)
def __repr__(self):
return 'ScalingPolicy(%s group:%s adjustment:%s)' % (self.name,
self.as_name,
self.adjustment_type)
def startElement(self, name, attrs, connection):
if name == 'Alarms':
self.alarms = ResultSet([('member', Alarm)])
return self.alarms
def endElement(self, name, value, connection):
if name == 'PolicyName':
self.name = value
elif name == 'AutoScalingGroupName':
self.as_name = value
elif name == 'PolicyARN':
self.policy_arn = value
elif name == 'ScalingAdjustment':
self.scaling_adjustment = int(value)
elif name == 'Cooldown':
self.cooldown = int(value)
elif name == 'AdjustmentType':
self.adjustment_type = value
elif name == 'MinAdjustmentStep':
self.min_adjustment_step = int(value)
def delete(self):
return self.connection.delete_policy(self.name, self.as_name)
class TerminationPolicies(list):
def __init__(self, connection=None, **kwargs):
pass
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'member':
self.append(value)
| bsd-3-clause |
loneknightpy/spark | python/pyspark/statcounter.py | 130 | 5115 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is ported from spark/util/StatCounter.scala
import copy
import math
try:
from numpy import maximum, minimum, sqrt
except ImportError:
maximum = max
minimum = min
sqrt = math.sqrt
class StatCounter(object):
def __init__(self, values=None):
if values is None:
values = list()
self.n = 0 # Running count of our values
self.mu = 0.0 # Running mean of our values
self.m2 = 0.0 # Running variance numerator (sum of (x - mean)^2)
self.maxValue = float("-inf")
self.minValue = float("inf")
for v in values:
self.merge(v)
# Add a value into this StatCounter, updating the internal statistics.
def merge(self, value):
delta = value - self.mu
self.n += 1
self.mu += delta / self.n
self.m2 += delta * (value - self.mu)
self.maxValue = maximum(self.maxValue, value)
self.minValue = minimum(self.minValue, value)
return self
# Merge another StatCounter into this one, adding up the internal statistics.
def mergeStats(self, other):
if not isinstance(other, StatCounter):
raise Exception("Can only merge Statcounters!")
if other is self: # reference equality holds
self.merge(copy.deepcopy(other)) # Avoid overwriting fields in a weird order
else:
if self.n == 0:
self.mu = other.mu
self.m2 = other.m2
self.n = other.n
self.maxValue = other.maxValue
self.minValue = other.minValue
elif other.n != 0:
delta = other.mu - self.mu
if other.n * 10 < self.n:
self.mu = self.mu + (delta * other.n) / (self.n + other.n)
elif self.n * 10 < other.n:
self.mu = other.mu - (delta * self.n) / (self.n + other.n)
else:
self.mu = (self.mu * self.n + other.mu * other.n) / (self.n + other.n)
self.maxValue = maximum(self.maxValue, other.maxValue)
self.minValue = minimum(self.minValue, other.minValue)
self.m2 += other.m2 + (delta * delta * self.n * other.n) / (self.n + other.n)
self.n += other.n
return self
# Clone this StatCounter
def copy(self):
return copy.deepcopy(self)
def count(self):
return int(self.n)
def mean(self):
return self.mu
def sum(self):
return self.n * self.mu
def min(self):
return self.minValue
def max(self):
return self.maxValue
# Return the variance of the values.
def variance(self):
if self.n == 0:
return float('nan')
else:
return self.m2 / self.n
#
# Return the sample variance, which corrects for bias in estimating the variance by dividing
# by N-1 instead of N.
#
def sampleVariance(self):
if self.n <= 1:
return float('nan')
else:
return self.m2 / (self.n - 1)
# Return the standard deviation of the values.
def stdev(self):
return sqrt(self.variance())
#
# Return the sample standard deviation of the values, which corrects for bias in estimating the
# variance by dividing by N-1 instead of N.
#
def sampleStdev(self):
return sqrt(self.sampleVariance())
def asDict(self, sample=False):
"""Returns the :class:`StatCounter` members as a ``dict``.
>>> sc.parallelize([1., 2., 3., 4.]).stats().asDict()
{'count': 4L,
'max': 4.0,
'mean': 2.5,
'min': 1.0,
'stdev': 1.2909944487358056,
'sum': 10.0,
'variance': 1.6666666666666667}
"""
return {
'count': self.count(),
'mean': self.mean(),
'sum': self.sum(),
'min': self.min(),
'max': self.max(),
'stdev': self.stdev() if sample else self.sampleStdev(),
'variance': self.variance() if sample else self.sampleVariance()
}
def __repr__(self):
return ("(count: %s, mean: %s, stdev: %s, max: %s, min: %s)" %
(self.count(), self.mean(), self.stdev(), self.max(), self.min()))
| apache-2.0 |
fooelisa/ansible-modules-extras | database/misc/riak.py | 57 | 7967 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, James Martin <[email protected]>, Drew Kerrigan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: riak
short_description: This module handles some common Riak operations
description:
- This module can be used to join nodes to a cluster, check
the status of the cluster.
version_added: "1.2"
options:
command:
description:
- The command you would like to perform against the cluster.
required: false
default: null
aliases: []
choices: ['ping', 'kv_test', 'join', 'plan', 'commit']
config_dir:
description:
- The path to the riak configuration directory
required: false
default: /etc/riak
aliases: []
http_conn:
description:
- The ip address and port that is listening for Riak HTTP queries
required: false
default: 127.0.0.1:8098
aliases: []
target_node:
description:
- The target node for certain operations (join, ping)
required: false
default: [email protected]
aliases: []
wait_for_handoffs:
description:
- Number of seconds to wait for handoffs to complete.
required: false
default: null
aliases: []
type: 'int'
wait_for_ring:
description:
- Number of seconds to wait for all nodes to agree on the ring.
required: false
default: null
aliases: []
type: 'int'
wait_for_service:
description:
- Waits for a riak service to come online before continuing.
required: false
default: None
aliases: []
choices: ['kv']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
'''
EXAMPLES = '''
# Join's a Riak node to another node
- riak: command=join [email protected]
# Wait for handoffs to finish. Use with async and poll.
- riak: wait_for_handoffs=yes
# Wait for riak_kv service to startup
- riak: wait_for_service=kv
'''
import urllib2
import time
import socket
import sys
try:
import json
except ImportError:
import simplejson as json
def ring_check(module, riak_admin_bin):
cmd = '%s ringready' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0 and 'TRUE All nodes agree on the ring' in out:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=False, default=None, choices=[
'ping', 'kv_test', 'join', 'plan', 'commit']),
config_dir=dict(default='/etc/riak'),
http_conn=dict(required=False, default='127.0.0.1:8098'),
target_node=dict(default='[email protected]', required=False),
wait_for_handoffs=dict(default=False, type='int'),
wait_for_ring=dict(default=False, type='int'),
wait_for_service=dict(
required=False, default=None, choices=['kv']),
validate_certs = dict(default='yes', type='bool'))
)
command = module.params.get('command')
config_dir = module.params.get('config_dir')
http_conn = module.params.get('http_conn')
target_node = module.params.get('target_node')
wait_for_handoffs = module.params.get('wait_for_handoffs')
wait_for_ring = module.params.get('wait_for_ring')
wait_for_service = module.params.get('wait_for_service')
validate_certs = module.params.get('validate_certs')
#make sure riak commands are on the path
riak_bin = module.get_bin_path('riak')
riak_admin_bin = module.get_bin_path('riak-admin')
timeout = time.time() + 120
while True:
if time.time() > timeout:
module.fail_json(msg='Timeout, could not fetch Riak stats.')
(response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5)
if info['status'] == 200:
stats_raw = response.read()
break
time.sleep(5)
# here we attempt to load those stats,
try:
stats = json.loads(stats_raw)
except:
module.fail_json(msg='Could not parse Riak stats.')
node_name = stats['nodename']
nodes = stats['ring_members']
ring_size = stats['ring_creation_size']
rc, out, err = module.run_command([riak_bin, 'version'] )
version = out.strip()
result = dict(node_name=node_name,
nodes=nodes,
ring_size=ring_size,
version=version)
if command == 'ping':
cmd = '%s ping %s' % ( riak_bin, target_node )
rc, out, err = module.run_command(cmd)
if rc == 0:
result['ping'] = out
else:
module.fail_json(msg=out)
elif command == 'kv_test':
cmd = '%s test' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['kv_test'] = out
else:
module.fail_json(msg=out)
elif command == 'join':
if nodes.count(node_name) == 1 and len(nodes) > 1:
result['join'] = 'Node is already in cluster or staged to be in cluster.'
else:
cmd = '%s cluster join %s' % (riak_admin_bin, target_node)
rc, out, err = module.run_command(cmd)
if rc == 0:
result['join'] = out
result['changed'] = True
else:
module.fail_json(msg=out)
elif command == 'plan':
cmd = '%s cluster plan' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['plan'] = out
if 'Staged Changes' in out:
result['changed'] = True
else:
module.fail_json(msg=out)
elif command == 'commit':
cmd = '%s cluster commit' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['commit'] = out
result['changed'] = True
else:
module.fail_json(msg=out)
# this could take a while, recommend to run in async mode
if wait_for_handoffs:
timeout = time.time() + wait_for_handoffs
while True:
cmd = '%s transfers' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if 'No transfers active' in out:
result['handoffs'] = 'No transfers active.'
break
time.sleep(10)
if time.time() > timeout:
module.fail_json(msg='Timeout waiting for handoffs.')
if wait_for_service:
cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name ]
rc, out, err = module.run_command(cmd)
result['service'] = out
if wait_for_ring:
timeout = time.time() + wait_for_ring
while True:
if ring_check(module, riak_admin_bin):
break
time.sleep(10)
if time.time() > timeout:
module.fail_json(msg='Timeout waiting for nodes to agree on ring.')
result['ring_ready'] = ring_check(module, riak_admin_bin)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
ecugol/django-geoip | django_geoip/vendor/progressbar/progressbar.py | 3 | 9159 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# progressbar - Text progress bar library for Python.
# Copyright (c) 2005 Nilton Volpato
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Main ProgressBar class."""
from __future__ import division
import math
import os
import signal
import sys
import time
from . import widgets
try:
from fcntl import ioctl
from array import array
import termios
except ImportError:
pass
class UnknownLength: pass
class ProgressBar(object):
"""The ProgressBar class which updates and prints the bar.
A common way of using it is like:
>>> pbar = ProgressBar().start()
>>> for i in range(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
You can also use a ProgressBar as an iterator:
>>> progress = ProgressBar()
>>> for i in progress(some_iterable):
... # do something
...
Since the progress bar is incredibly customizable you can specify
different widgets of any type in any order. You can even write your own
widgets! However, since there are already a good number of widgets you
should probably play around with them before moving on to create your own
widgets.
The term_width parameter represents the current terminal width. If the
parameter is set to an integer then the progress bar will use that,
otherwise it will attempt to determine the terminal width falling back to
80 columns if the width cannot be determined.
When implementing a widget's update method you are passed a reference to
the current progress bar. As a result, you have access to the
ProgressBar's methods and attributes. Although there is nothing preventing
you from changing the ProgressBar you should treat it as read only.
Useful methods and attributes include (Public API):
- currval: current progress (0 <= currval <= maxval)
- maxval: maximum (and final) value
- finished: True if the bar has finished (reached 100%)
- start_time: the time when start() method of ProgressBar was called
- seconds_elapsed: seconds elapsed since start_time and last call to
update
- percentage(): progress in percent [0..100]
"""
__slots__ = ('currval', 'fd', 'finished', 'last_update_time',
'left_justify', 'maxval', 'next_update', 'num_intervals',
'poll', 'seconds_elapsed', 'signal_set', 'start_time',
'term_width', 'update_interval', 'widgets', '_time_sensitive',
'__iterable')
_DEFAULT_MAXVAL = 100
_DEFAULT_TERMSIZE = 80
_DEFAULT_WIDGETS = [widgets.Percentage(), ' ', widgets.Bar()]
def __init__(self, maxval=None, widgets=None, term_width=None, poll=1,
left_justify=True, fd=sys.stderr):
"""Initializes a progress bar with sane defaults."""
# Don't share a reference with any other progress bars
if widgets is None:
widgets = list(self._DEFAULT_WIDGETS)
self.maxval = maxval
self.widgets = widgets
self.fd = fd
self.left_justify = left_justify
self.signal_set = False
if term_width is not None:
self.term_width = term_width
else:
try:
self._handle_resize()
signal.signal(signal.SIGWINCH, self._handle_resize)
self.signal_set = True
except (SystemExit, KeyboardInterrupt): raise
except:
self.term_width = self._env_size()
self.__iterable = None
self._update_widgets()
self.currval = 0
self.finished = False
self.last_update_time = None
self.poll = poll
self.seconds_elapsed = 0
self.start_time = None
self.update_interval = 1
def __call__(self, iterable):
"""Use a ProgressBar to iterate through an iterable."""
try:
self.maxval = len(iterable)
except:
if self.maxval is None:
self.maxval = UnknownLength
self.__iterable = iter(iterable)
return self
def __iter__(self):
return self
def __next__(self):
try:
value = next(self.__iterable)
if self.start_time is None: self.start()
else: self.update(self.currval + 1)
return value
except StopIteration:
self.finish()
raise
# Create an alias so that Python 2.x won't complain about not being
# an iterator.
next = __next__
def _env_size(self):
"""Tries to find the term_width from the environment."""
return int(os.environ.get('COLUMNS', self._DEFAULT_TERMSIZE)) - 1
def _handle_resize(self, signum=None, frame=None):
"""Tries to catch resize signals sent from the terminal."""
h, w = array('h', ioctl(self.fd, termios.TIOCGWINSZ, '\0' * 8))[:2]
self.term_width = w
def percentage(self):
"""Returns the progress as a percentage."""
return self.currval * 100.0 / self.maxval
percent = property(percentage)
def _format_widgets(self):
result = []
expanding = []
width = self.term_width
for index, widget in enumerate(self.widgets):
if isinstance(widget, widgets.WidgetHFill):
result.append(widget)
expanding.insert(0, index)
else:
widget = widgets.format_updatable(widget, self)
result.append(widget)
width -= len(widget)
count = len(expanding)
while count:
portion = max(int(math.ceil(width * 1. / count)), 0)
index = expanding.pop()
count -= 1
widget = result[index].update(self, portion)
width -= len(widget)
result[index] = widget
return result
def _format_line(self):
"""Joins the widgets and justifies the line."""
widgets = ''.join(self._format_widgets())
if self.left_justify: return widgets.ljust(self.term_width)
else: return widgets.rjust(self.term_width)
def _need_update(self):
"""Returns whether the ProgressBar should redraw the line."""
if self.currval >= self.next_update or self.finished: return True
delta = time.time() - self.last_update_time
return self._time_sensitive and delta > self.poll
def _update_widgets(self):
"""Checks all widgets for the time sensitive bit."""
self._time_sensitive = any(getattr(w, 'TIME_SENSITIVE', False)
for w in self.widgets)
def update(self, value=None):
"""Updates the ProgressBar to a new value."""
if value is not None and value is not UnknownLength:
if (self.maxval is not UnknownLength
and not 0 <= value <= self.maxval):
raise ValueError('Value out of range')
self.currval = value
if not self._need_update(): return
if self.start_time is None:
raise RuntimeError('You must call "start" before calling "update"')
now = time.time()
self.seconds_elapsed = now - self.start_time
self.next_update = self.currval + self.update_interval
self.fd.write(self._format_line() + '\r')
self.last_update_time = now
def start(self):
"""Starts measuring time, and prints the bar at 0%.
It returns self so you can use it like this:
>>> pbar = ProgressBar().start()
>>> for i in range(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
"""
if self.maxval is None:
self.maxval = self._DEFAULT_MAXVAL
self.num_intervals = max(100, self.term_width)
self.next_update = 0
if self.maxval is not UnknownLength:
if self.maxval < 0: raise ValueError('Value out of range')
self.update_interval = self.maxval / self.num_intervals
self.start_time = self.last_update_time = time.time()
self.update(0)
return self
def finish(self):
"""Puts the ProgressBar bar in the finished state."""
self.finished = True
self.update(self.maxval)
self.fd.write('\n')
if self.signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
| mit |
bluemini/kuma | vendor/packages/setuptools/command/bdist_egg.py | 306 | 17184 | """setuptools.command.bdist_egg
Build .egg distributions"""
from distutils.errors import DistutilsSetupError
from distutils.dir_util import remove_tree, mkpath
from distutils import log
from types import CodeType
import sys
import os
import marshal
import textwrap
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from setuptools.compat import basestring
from setuptools.extension import Library
from setuptools import Command
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
_stub_template = textwrap.dedent("""
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, %r)
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
""").lstrip()
with open(pyfile, 'w') as f:
f.write(_stub_template % resource)
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p', "platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files, []
for item in old:
if isinstance(item, tuple) and len(item) == 2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized == site_packages or normalized.startswith(
site_packages + os.sep
):
item = realpath[len(site_packages) + 1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self, cmdname, **kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname, self.bdist_dir)
kw.setdefault('skip_build', self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root
instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p, ext_name) in enumerate(ext_outputs):
filename, ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep, '/')
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root, 'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts', install_dir=script_dir,
no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_egg', get_python_version(), self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base, dirs, files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base, name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution, 'zip_safe', None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation', {}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info, '')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir: ''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base] + filename)
for filename in dirs:
paths[os.path.join(base, filename)] = (paths[base] +
filename + '/')
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext, Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir, filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base, dirs, files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base, dirs, files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag, fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
return flag
if not can_scan():
return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag, fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe) == flag:
f = open(fn, 'wt')
f.write('\n')
f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base, name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename, 'rb')
f.read(skip)
code = marshal.load(f)
f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, basestring):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
mode='w'):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir) + 1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
| mpl-2.0 |
mou4e/zirconium | chrome/common/extensions/docs/server2/samples_data_source.py | 16 | 1167 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import traceback
from data_source import DataSource
from extensions_paths import EXAMPLES
from future import All, Future
from jsc_view import CreateSamplesView
from platform_util import GetPlatforms
class SamplesDataSource(DataSource):
'''Constructs a list of samples and their respective files and api calls.
'''
def __init__(self, server_instance, request):
self._platform_bundle = server_instance.platform_bundle
self._request = request
def _GetImpl(self, platform):
cache = self._platform_bundle.GetSamplesModel(platform).GetCache()
create_view = lambda samp_list: CreateSamplesView(samp_list, self._request)
return cache.GetFromFileListing('' if platform == 'apps'
else EXAMPLES).Then(create_view)
def get(self, platform):
return self._GetImpl(platform).Get()
def GetRefreshPaths(self):
return [platform for platform in GetPlatforms()]
def Refresh(self, path):
return self._GetImpl(path)
| bsd-3-clause |
neubot/neubot | neubot/system_posix.py | 2 | 3747 | # neubot/system_posix.py
#
# Copyright (c) 2010-2011
# Nexa Center for Internet & Society, Politecnico di Torino (DAUIN)
# and Simone Basso <[email protected]>
#
# This file is part of Neubot <http://www.neubot.org/>.
#
# Neubot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neubot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neubot. If not, see <http://www.gnu.org/licenses/>.
#
'''
Code for UNIX
'''
# NB: This code is currently being refactored.
#
# When we MUST exit better to use os._exit() rather than
# sys.exit() because the former cannot be catched while
# the latter can.
#
UNPRIV_USER = '_neubot'
import os
import syslog
from neubot import utils_hier
from neubot import utils_posix
from neubot import utils_rc
def __logger(severity, message):
''' Log @message at the given @severity using syslog '''
#
# Implemented using syslog becuse SysLogHandler is
# difficult to use: you need to know the path to the
# system specific ``/dev/log``.
#
if severity == 'ERROR':
syslog.syslog(syslog.LOG_ERR, message)
elif severity == 'WARNING':
syslog.syslog(syslog.LOG_WARNING, message)
elif severity == 'DEBUG':
syslog.syslog(syslog.LOG_DEBUG, message)
else:
syslog.syslog(syslog.LOG_INFO, message)
def get_background_logger():
''' Return the background logger '''
syslog.openlog("neubot", syslog.LOG_PID, syslog.LOG_DAEMON)
return __logger
def _get_profile_dir():
''' The profile directory is always LOCALSTATEDIR '''
return utils_hier.LOCALSTATEDIR
def _want_rwx_dir(datadir):
'''
This function ensures that the unprivileged user is the
owner of the directory that contains Neubot database.
Otherwise sqlite3 fails to lock the database for writing
(it creates a lockfile for that).
Read more at http://www.neubot.org/node/14
'''
# Does the directory exist?
if not os.path.isdir(datadir):
os.mkdir(datadir, 493) # 0755 in base 10
# Change directory ownership
if os.getuid() == 0:
passwd = getpwnam()
os.chown(datadir, passwd.pw_uid, passwd.pw_gid)
def go_background():
''' Detach from the shell and run in background '''
utils_posix.daemonize(pidfile='/var/run/neubot.pid')
def getpwnam():
''' Wrapper for getpwnam '''
cnf = utils_rc.parse_safe('/etc/neubot/users')
unpriv_user = cnf.get('unpriv_user', UNPRIV_USER)
passwd = utils_posix.getpwnam(unpriv_user)
return passwd
def drop_privileges():
'''
Drop root privileges and run on behalf of the specified
unprivileged users.
'''
passwd = getpwnam()
utils_posix.chuser(passwd)
def _want_rw_file(path):
'''
Ensure that the given file is readable and writable
by its owner. If running as root force ownership
to be of the unprivileged user.
'''
# Create file if non-existent
filep = open(path, "ab+")
filep.close()
# Enforce file ownership
if os.getuid() == 0:
passwd = getpwnam()
os.chown(path, passwd.pw_uid, passwd.pw_gid)
# Set permissions
os.chmod(path, 420) # 0644 in base 10
def has_enough_privs():
''' Returns true if this process has enough privileges '''
return os.getuid() == 0
| gpl-3.0 |
carlodri/moviepy | moviepy/video/io/VideoFileClip.py | 14 | 2711 | import os
from moviepy.video.VideoClip import VideoClip
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.Clip import Clip
from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
class VideoFileClip(VideoClip):
"""
A video clip originating from a movie file. For instance: ::
>>> clip = VideofileClip("myHolidays.mp4")
>>> clip2 = VideofileClip("myMaskVideo.avi")
Parameters
------------
filename:
The name of the video file. It can have any extension supported
by ffmpeg: .ogv, .mp4, .mpeg, .avi, .mov etc.
has_mask:
Set this to 'True' if there is a mask included in the videofile.
Video files rarely contain masks, but some video codecs enable
that. For istance if you have a MoviePy VideoClip with a mask you
can save it to a videofile with a mask. (see also
``VideoClip.write_videofile`` for more details).
audio:
Set to `False` if the clip doesn't have any audio or if you do not
wish to read the audio.
Attributes
-----------
filename:
Name of the original video file.
fps:
Frames per second in the original file.
"""
def __init__(self, filename, has_mask=False,
audio=True, audio_buffersize = 200000,
audio_fps=44100, audio_nbytes=2, verbose=False):
VideoClip.__init__(self)
# Make a reader
pix_fmt= "rgba" if has_mask else "rgb24"
reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt)
self.reader = reader
# Make some of the reader's attributes accessible from the clip
self.duration = self.reader.duration
self.end = self.reader.duration
self.fps = self.reader.fps
self.size = self.reader.size
if has_mask:
self.make_frame = lambda t: reader.get_frame(t)[:,:,:3]
mask_mf = lambda t: reader.get_frame(t)[:,:,3]/255.0
self.mask = (VideoClip(ismask = True, make_frame = mask_mf)
.set_duration(self.duration))
self.mask.fps = self.fps
else:
self.make_frame = lambda t: reader.get_frame(t)
# Make a reader for the audio, if any.
if audio and self.reader.infos['audio_found']:
self.audio = AudioFileClip(filename,
buffersize= audio_buffersize,
fps = audio_fps,
nbytes = audio_nbytes)
def __del__(self):
""" Close/delete the internal reader. """
del self.reader
| mit |
mithron/opendatahack | web/main.py | 1 | 1805 | from datetime import datetime
import json
import os
from urlparse import urlparse
from pymongo.connection import Connection
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
MONGO_URL = "" # found with $>heroku config
we_live = True
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/list/", MainHandler),
(r"/([0-9]+)/", SchoolHandler)
]
settings = dict(
autoescape=None,
)
tornado.web.Application.__init__(self, handlers, **settings)
if we_live:
self.con = Connection(MONGO_URL)
self.database = self.con[urlparse(MONGO_URL).path[1:]]
else:
self.con = Connection('localhost', 27017)
self.database = self.con["moscow"]
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.database
class SchoolHandler(BaseHandler):
def get(self, inn=None):
if inn:
suppliers = list(self.db["suppliers"].find({'inn': int(inn)}, fields={"_id": False}))
self.write(json.dumps(suppliers, ensure_ascii=False, encoding='utf8'))
else:
self.write("[]")
class MainHandler(BaseHandler):
def get(self):
schools = list(self.db["suppliers"].find(fields={"full_name": True, "inn": True, "_id": False}))
self.write(json.dumps(schools, ensure_ascii=False, encoding='utf8'))
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(int(os.environ.get("PORT", 8888)))
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main() | mit |
jnerin/ansible | lib/ansible/modules/source_control/gitlab_project.py | 16 | 14955 | #!/usr/bin/python
# (c) 2015, Werner Dijkerman ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gitlab_project
short_description: Creates/updates/deletes Gitlab Projects
description:
- When the project does not exists in Gitlab, it will be created.
- When the project does exists and state=absent, the project will be deleted.
- When changes are made to the project, the project will be updated.
version_added: "2.1"
author: "Werner Dijkerman (@dj-wasabi)"
requirements:
- pyapi-gitlab python module
options:
server_url:
description:
- Url of Gitlab server, with protocol (http or https).
required: true
validate_certs:
description:
- When using https if SSL certificate needs to be verified.
required: false
default: true
aliases:
- verify_ssl
login_user:
description:
- Gitlab user name.
required: false
default: null
login_password:
description:
- Gitlab password for login_user
required: false
default: null
login_token:
description:
- Gitlab token for logging in.
required: false
default: null
group:
description:
- The name of the group of which this projects belongs to.
- When not provided, project will belong to user which is configured in 'login_user' or 'login_token'
- When provided with username, project will be created for this user. 'login_user' or 'login_token' needs admin rights.
required: false
default: null
name:
description:
- The name of the project
required: true
path:
description:
- The path of the project you want to create, this will be server_url/<group>/path
- If not supplied, name will be used.
required: false
default: null
description:
description:
- An description for the project.
required: false
default: null
issues_enabled:
description:
- Whether you want to create issues or not.
- Possible values are true and false.
required: false
default: true
merge_requests_enabled:
description:
- If merge requests can be made or not.
- Possible values are true and false.
required: false
default: true
wiki_enabled:
description:
- If an wiki for this project should be available or not.
- Possible values are true and false.
required: false
default: true
snippets_enabled:
description:
- If creating snippets should be available or not.
- Possible values are true and false.
required: false
default: true
public:
description:
- If the project is public available or not.
- Setting this to true is same as setting visibility_level to 20.
- Possible values are true and false.
required: false
default: false
visibility_level:
description:
- Private. visibility_level is 0. Project access must be granted explicitly for each user.
- Internal. visibility_level is 10. The project can be cloned by any logged in user.
- Public. visibility_level is 20. The project can be cloned without any authentication.
- Possible values are 0, 10 and 20.
required: false
default: 0
import_url:
description:
- Git repository which will be imported into gitlab.
- Gitlab server needs read access to this git repository.
required: false
default: false
state:
description:
- create or delete project.
- Possible values are present and absent.
required: false
default: "present"
choices: ["present", "absent"]
'''
EXAMPLES = '''
- name: Delete Gitlab Project
gitlab_project:
server_url: http://gitlab.example.com
validate_certs: False
login_token: WnUzDsxjy8230-Dy_k
name: my_first_project
state: absent
delegate_to: localhost
- name: Create Gitlab Project in group Ansible
gitlab_project:
server_url: https://gitlab.example.com
validate_certs: True
login_user: dj-wasabi
login_password: MySecretPassword
name: my_first_project
group: ansible
issues_enabled: False
wiki_enabled: True
snippets_enabled: True
import_url: http://git.example.com/example/lab.git
state: present
delegate_to: localhost
'''
RETURN = '''# '''
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except:
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class GitLabProject(object):
def __init__(self, module, git):
self._module = module
self._gitlab = git
def createOrUpdateProject(self, project_exists, group_name, import_url, arguments):
is_user = False
group_id = self.getGroupId(group_name)
if not group_id:
group_id = self.getUserId(group_name)
is_user = True
if project_exists:
# Edit project
return self.updateProject(group_name, arguments)
else:
# Create project
if self._module.check_mode:
self._module.exit_json(changed=True)
return self.createProject(is_user, group_id, import_url, arguments)
def createProject(self, is_user, user_id, import_url, arguments):
if is_user:
return self._gitlab.createprojectuser(user_id=user_id, import_url=import_url, **arguments)
else:
group_id = user_id
return self._gitlab.createproject(namespace_id=group_id, import_url=import_url, **arguments)
def deleteProject(self, group_name, project_name):
if self.existsGroup(group_name):
project_owner = group_name
else:
project_owner = self._gitlab.currentuser()['username']
search_results = self._gitlab.searchproject(search=project_name)
for result in search_results:
owner = result['namespace']['name']
if owner == project_owner:
return self._gitlab.deleteproject(result['id'])
def existsProject(self, group_name, project_name):
if self.existsGroup(group_name):
project_owner = group_name
else:
project_owner = self._gitlab.currentuser()['username']
search_results = self._gitlab.searchproject(search=project_name)
for result in search_results:
owner = result['namespace']['name']
if owner == project_owner:
return True
return False
def existsGroup(self, group_name):
if group_name is not None:
# Find the group, if group not exists we try for user
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return True
user_name = group_name
user_data = self._gitlab.getusers(search=user_name)
for data in user_data:
if 'id' in user_data:
return True
return False
def getGroupId(self, group_name):
if group_name is not None:
# Find the group, if group not exists we try for user
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return group['id']
def getProjectId(self, group_name, project_name):
if self.existsGroup(group_name):
project_owner = group_name
else:
project_owner = self._gitlab.currentuser()['username']
search_results = self._gitlab.searchproject(search=project_name)
for result in search_results:
owner = result['namespace']['name']
if owner == project_owner:
return result['id']
def getUserId(self, user_name):
user_data = self._gitlab.getusers(search=user_name)
for data in user_data:
if 'id' in data:
return data['id']
return self._gitlab.currentuser()['id']
def to_bool(self, value):
if value:
return 1
else:
return 0
def updateProject(self, group_name, arguments):
project_changed = False
project_name = arguments['name']
project_id = self.getProjectId(group_name, project_name)
project_data = self._gitlab.getproject(project_id=project_id)
for arg_key, arg_value in arguments.items():
project_data_value = project_data[arg_key]
if isinstance(project_data_value, bool) or project_data_value is None:
to_bool = self.to_bool(project_data_value)
if to_bool != arg_value:
project_changed = True
continue
else:
if project_data_value != arg_value:
project_changed = True
if project_changed:
if self._module.check_mode:
self._module.exit_json(changed=True)
return self._gitlab.editproject(project_id=project_id, **arguments)
else:
return False
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True),
validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
login_user=dict(required=False, no_log=True),
login_password=dict(required=False, no_log=True),
login_token=dict(required=False, no_log=True),
group=dict(required=False),
name=dict(required=True),
path=dict(required=False),
description=dict(required=False),
issues_enabled=dict(default=True, type='bool'),
merge_requests_enabled=dict(default=True, type='bool'),
wiki_enabled=dict(default=True, type='bool'),
snippets_enabled=dict(default=True, type='bool'),
public=dict(default=False, type='bool'),
visibility_level=dict(default="0", choices=["0", "10", "20"]),
import_url=dict(required=False),
state=dict(default="present", choices=["present", 'absent']),
),
supports_check_mode=True
)
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab")
server_url = module.params['server_url']
verify_ssl = module.params['validate_certs']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_token = module.params['login_token']
group_name = module.params['group']
project_name = module.params['name']
project_path = module.params['path']
description = module.params['description']
issues_enabled = module.params['issues_enabled']
merge_requests_enabled = module.params['merge_requests_enabled']
wiki_enabled = module.params['wiki_enabled']
snippets_enabled = module.params['snippets_enabled']
public = module.params['public']
visibility_level = module.params['visibility_level']
import_url = module.params['import_url']
state = module.params['state']
# We need both login_user and login_password or login_token, otherwise we fail.
if login_user is not None and login_password is not None:
use_credentials = True
elif login_token is not None:
use_credentials = False
else:
module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
# Set project_path to project_name if it is empty.
if project_path is None:
project_path = project_name.replace(" ", "_")
# Gitlab API makes no difference between upper and lower cases, so we lower them.
project_name = project_name.lower()
project_path = project_path.lower()
if group_name is not None:
group_name = group_name.lower()
# Lets make an connection to the Gitlab server_url, with either login_user and login_password
# or with login_token
try:
if use_credentials:
git = gitlab.Gitlab(host=server_url, verify_ssl=verify_ssl)
git.login(user=login_user, password=login_password)
else:
git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
except Exception as e:
module.fail_json(msg="Failed to connect to Gitlab server: %s " % to_native(e))
# Check if user is authorized or not before proceeding to any operations
# if not, exit from here
auth_msg = git.currentuser().get('message', None)
if auth_msg is not None and auth_msg == '401 Unauthorized':
module.fail_json(msg='User unauthorized',
details="User is not allowed to access Gitlab server "
"using login_token. Please check login_token")
# Validate if project exists and take action based on "state"
project = GitLabProject(module, git)
project_exists = project.existsProject(group_name, project_name)
# Creating the project dict
arguments = {"name": project_name,
"path": project_path,
"description": description,
"issues_enabled": project.to_bool(issues_enabled),
"merge_requests_enabled": project.to_bool(merge_requests_enabled),
"wiki_enabled": project.to_bool(wiki_enabled),
"snippets_enabled": project.to_bool(snippets_enabled),
"public": project.to_bool(public),
"visibility_level": int(visibility_level)}
if project_exists and state == "absent":
project.deleteProject(group_name, project_name)
module.exit_json(changed=True, result="Successfully deleted project %s" % project_name)
else:
if state == "absent":
module.exit_json(changed=False, result="Project deleted or does not exists")
else:
if project.createOrUpdateProject(project_exists, group_name, import_url, arguments):
module.exit_json(changed=True, result="Successfully created or updated the project %s" % project_name)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
dexterx17/nodoSocket | clients/Python-2.7.6/Lib/bsddb/test/test_early_close.py | 72 | 7440 | """TestCases for checking that it does not segfault when a DBEnv object
is closed before its DB objects.
"""
import os, sys
import unittest
from test_all import db, test_support, verbose, get_new_environment_path, get_new_database_path
# We're going to get warnings in this module about trying to close the db when
# its env is already closed. Let's just ignore those.
try:
import warnings
except ImportError:
pass
else:
warnings.filterwarnings('ignore',
message='DB could not be closed in',
category=RuntimeWarning)
#----------------------------------------------------------------------
class DBEnvClosedEarlyCrash(unittest.TestCase):
def setUp(self):
self.homeDir = get_new_environment_path()
self.filename = "test"
def tearDown(self):
test_support.rmtree(self.homeDir)
def test01_close_dbenv_before_db(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
d = db.DB(dbenv)
d2 = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
self.assertRaises(db.DBNoSuchFileError, d2.open,
self.filename+"2", db.DB_BTREE, db.DB_THREAD, 0666)
d.put("test","this is a test")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
dbenv.close() # This "close" should close the child db handle also
self.assertRaises(db.DBError, d.get, "test")
def test02_close_dbenv_before_dbcursor(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
d.put("test","this is a test")
d.put("test2","another test")
d.put("test3","another one")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
c=d.cursor()
c.first()
c.next()
d.close() # This "close" should close the child db handle also
# db.close should close the child cursor
self.assertRaises(db.DBError,c.next)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
c=d.cursor()
c.first()
c.next()
dbenv.close()
# The "close" should close the child db handle also, with cursors
self.assertRaises(db.DBError, c.next)
def test03_close_db_before_dbcursor_without_env(self):
import os.path
path=os.path.join(self.homeDir,self.filename)
d = db.DB()
d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
d.put("test","this is a test")
d.put("test2","another test")
d.put("test3","another one")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
c=d.cursor()
c.first()
c.next()
d.close()
# The "close" should close the child db handle also
self.assertRaises(db.DBError, c.next)
def test04_close_massive(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
dbs=[db.DB(dbenv) for i in xrange(16)]
cursors=[]
for i in dbs :
i.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
dbs[10].put("test","this is a test")
dbs[10].put("test2","another test")
dbs[10].put("test3","another one")
self.assertEqual(dbs[4].get("test"), "this is a test", "put!=get")
for i in dbs :
cursors.extend([i.cursor() for j in xrange(32)])
for i in dbs[::3] :
i.close()
for i in cursors[::3] :
i.close()
# Check for missing exception in DB! (after DB close)
self.assertRaises(db.DBError, dbs[9].get, "test")
# Check for missing exception in DBCursor! (after DB close)
self.assertRaises(db.DBError, cursors[101].first)
cursors[80].first()
cursors[80].next()
dbenv.close() # This "close" should close the child db handle also
# Check for missing exception! (after DBEnv close)
self.assertRaises(db.DBError, cursors[80].next)
def test05_close_dbenv_delete_db_success(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
dbenv.close() # This "close" should close the child db handle also
del d
try:
import gc
except ImportError:
gc = None
if gc:
# force d.__del__ [DB_dealloc] to be called
gc.collect()
def test06_close_txn_before_dup_cursor(self) :
dbenv = db.DBEnv()
dbenv.open(self.homeDir,db.DB_INIT_TXN | db.DB_INIT_MPOOL |
db.DB_INIT_LOG | db.DB_CREATE)
d = db.DB(dbenv)
txn = dbenv.txn_begin()
d.open(self.filename, dbtype = db.DB_HASH, flags = db.DB_CREATE,
txn=txn)
d.put("XXX", "yyy", txn=txn)
txn.commit()
txn = dbenv.txn_begin()
c1 = d.cursor(txn)
c2 = c1.dup()
self.assertEqual(("XXX", "yyy"), c1.first())
# Not interested in warnings about implicit close.
import warnings
if sys.version_info < (2, 6) :
# Completely resetting the warning state is
# problematic with python >=2.6 with -3 (py3k warning),
# because some stdlib modules selectively ignores warnings.
warnings.simplefilter("ignore")
txn.commit()
warnings.resetwarnings()
else :
# When we drop support for python 2.4
# we could use: (in 2.5 we need a __future__ statement)
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# txn.commit()
#
# We can not use "with" as is, because it would be invalid syntax
# in python 2.4 and (with no __future__) 2.5.
# Here we simulate "with" following PEP 343 :
w = warnings.catch_warnings()
w.__enter__()
try :
warnings.simplefilter("ignore")
txn.commit()
finally :
w.__exit__()
self.assertRaises(db.DBCursorClosedError, c2.first)
def test07_close_db_before_sequence(self):
import os.path
path=os.path.join(self.homeDir,self.filename)
d = db.DB()
d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
dbs=db.DBSequence(d)
d.close() # This "close" should close the child DBSequence also
dbs.close() # If not closed, core dump (in Berkeley DB 4.6.*)
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBEnvClosedEarlyCrash))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| mit |
cgmckeever/contests | 2012-mebipenny/contest/reductio-ad-egyptium/solution.py | 7 | 1843 | import sys
import math
class Fraction:
def __init__(self, num, denom):
if (num > 0 and denom % num == 0):
# Reduce the fraction
denom /= num
num = 1
self.num = num
self.denom = denom
def subtract(self, other_num, other_denom):
common_denom = self.denom * other_denom
converted_num = self.num * common_denom / self.denom
converted_other_num = other_num * common_denom / other_denom
return Fraction(converted_num - converted_other_num, common_denom)
def largest_contained_egyptian(self):
if self.num == 0:
return Fraction(0, self.denom)
if self.num == 1:
return Fraction(1, self.denom)
next_denom = int(math.ceil((0.0 + self.denom) / self.num))
next_fraction = Fraction(1, next_denom)
return next_fraction
def __str__(self):
return "%d/%d" % (self.num, self.denom)
def main(num, denom):
goal = Fraction(num, denom)
curr_denom = goal.largest_contained_egyptian().denom
final_denoms = []
while goal.num != 0:
remainder = goal.subtract(1, curr_denom)
if remainder.num >= 0:
final_denoms.append(curr_denom)
goal = remainder
if False:
# simple version
curr_denom += 1;
else:
# advanced version: intelligently jump to the next available denominator
next_fraction = goal.largest_contained_egyptian()
curr_denom = next_fraction.denom
if goal.subtract(next_fraction.num, next_fraction.denom).num < 0:
print "*** rounding error ***"
final_denoms.append(0)
goal.num = 0
components = ["%d" % x for x in final_denoms]
print "%s" % ' '.join(components)
if __name__ == "__main__":
while True:
data = sys.stdin.readline()
if not data:
break
n, d = data.split(' ')
n = int(n)
d = int(d)
main(n, d)
| mit |
aptomar/apt-file-format | test/testAptofile.py | 1 | 23249 | ################################################################
# #
# testAptofile.py #
# Copyright (c) 2013 Aptomar AS, All Rights Reserved #
# #
# Author: Jarle Bauck Hamar: <[email protected]> #
# Date: 2013-05-23 #
# #
################################################################
import unittest
import sys
import json
sys.path.append('../src')
from aptofile import Aptofile
import jsonschema
class TestManifest(unittest.TestCase):
def setUp(self):
with open('tests/header.json') as fid:
self.inst = json.load(fid)
self.schema = Aptofile.SCHEMA
def validate(self):
try:
jsonschema.validate(self.inst, self.schema, Aptofile.VALIDATOR,
format_checker = jsonschema.FormatChecker())
except jsonschema.ValidationError:
return False
return True
def test_schema_validates(self):
Aptofile.VALIDATOR.check_schema(Aptofile.SCHEMA)
def test_valid_manifest_header(self):
self.assertTrue(self.validate())
def test_manifest_missing_date(self):
del self.inst["date"]
self.assertFalse(self.validate())
def test_manifest_missing_description(self):
del self.inst["description"]
self.assertFalse(self.validate())
def test_manifest_missing_version(self):
del self.inst["manifest_version"]
self.assertFalse(self.validate())
def test_manifest_missing_generator(self):
del self.inst["generator"]
self.assertFalse(self.validate())
def test_manifest_bad_date(self):
self.inst["date"] = "tomorrow"
self.assertFalse(self.validate())
def test_manifest_disallow_additional_properties(self):
self.inst["extra"] = "large"
self.assertFalse(self.validate())
class TestAsset(unittest.TestCase):
def testCreateAsset(self):
f = 'tests/asset.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'file:/layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
#Validate after write and open
self.assertTrue(Aptofile.validateFile(f))
def testAssetMissingFile(self):
f = 'tests/asset_missing_file.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
af.addFile2Layer('resource3.png','layer2','resources', writeFile=False)
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
def testAssetIncorrectLayerInGroup(self):
f = 'tests/asset_incorrect_layer_in_group.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer3'])
#Validate before write:
self.assertFalse(af.validate())
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
def testAssetMissingStyle(self):
f = 'tests/asset_missing_style.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
del af.manifest['asset']['layers']['layer1']['style']
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
def testAssetIncorrectDataType(self):
f = 'tests/asset_incorrect_data_type.apt'
with Aptofile.create(f,'asset') as af:
af.setDescription("This is a description of the asset.")
af.setGenerator("aptfile.py", "Aptomar AS")
af.addLayer('layer1', name='layer1-name',
geometry_data=[('tests/asset/layers/layer1.dbf',
'layers/layer1.dbf'),
('tests/asset/layers/layer1.shp',
'layers/layer1.shp'),
('tests/asset/layers/layer1.shx',
'layers/layer1.shx')])
af.addFile2Layer(('tests/asset/styles/layer1.xml',
'styles/layer1.xml'), 'layer1', 'style')
af.addFile2Layer(('tests/asset/resource1.png','resource1.png'),
'layer1', 'resources')
af.addFile2Layer(('tests/asset/resource2.png','resource2.png'),
'layer1', 'resources')
af.addLayer('layer2',name='layer2-name')
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.dbf', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shx', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('layers/layer1.shp', 'layer2',
'geometry', writeFile=False)
af.addFile2Layer('styles/layer1.xml','layer2',
'style', writeFile=False)
af.addFile2Layer('resource1.png','layer2','resources', writeFile=False)
af.addFile2Layer('resource2.png','layer2','resources', writeFile=False)
af.addFile2Layer('http://very-big-file.com/','layer2','resources', writeFile=True)
af.addGroup('group1','group1-name',['layer1'])
af.addGroup('group2','group2-name',['layer2'])
#Validate before write:
self.assertTrue(af.validate())
d=af.manifest['asset']['layers']['layer1']['style']['data'].pop()
af.manifest['asset']['layers']['layer1']['style']['data'] = d
#Validate after write and open
self.assertFalse(Aptofile.validateFile(f))
class TestImage(unittest.TestCase):
def testImage(self):
f = 'tests/image.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testImageMissingDate(self):
f = 'tests/image_missing_date.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
del af.manifest['image']['created']
self.assertFalse(Aptofile.validateFile(f))
def testImageIncorrectDate(self):
f = 'tests/image_missing_date.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
af.manifest['image']['created'] = '23.05.13'
af.validate()
self.assertFalse(Aptofile.validateFile(f))
def testImageMissingFileAndGenerator(self):
f = 'tests/image_missing_file_and_generator.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.manifest['image']['data']=['image.jpg']
del af.manifest['generator']
self.assertFalse(af.validate())
self.assertFalse(Aptofile.validateFile(f))
def testImageMissingGenerator(self):
f = 'tests/image_missing_generator.apt'
with Aptofile.create(f,'image') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the image')
af.setImageName('The image name')
af.setImageDescription('An image of something')
af.setImageGeoreference( 10.4344, 63.4181, 150.60)
af.setImageBounds(['data:,bounds as a string'])
af.addImageFile(('tests/image/image.jpg','image.jpg'))
self.assertTrue(af.validate())
del af.manifest['generator']
self.assertFalse(Aptofile.validateFile(f))
class testVideo(unittest.TestCase):
def testVideo(self):
f = 'tests/video.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
af.addVideoFile(('tests/video/video.avi','video.avi'))
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testVideoMissingFile(self):
f = 'tests/video_missing_file.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
self.assertFalse(af.validate())
self.assertFalse(Aptofile.validateFile(f))
def testVideoFileNotFound(self):
f = 'tests/video_file_not_found.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
af.manifest['video']['data']=['video.avi']
self.assertFalse(af.validate())
self.assertFalse(Aptofile.validateFile(f))
def testVideoMissingName(self):
f = 'tests/video_missing_name.apt'
with Aptofile.create(f,'video') as af:
af.setGenerator(program='aptfile.py',creator='Aptomar AS')
af.setDescription('This is a description of the video')
af.setVideoName('The video name')
af.setVideoDescription('A video of something')
af.setVideoGeoreference( 10.4344, 63.4181, 150.60)
af.addVideoFile(('tests/video/video.avi','video.avi'))
self.assertTrue(af.validate())
del af.manifest['video']['name']
self.assertFalse(Aptofile.validateFile(f))
class TestPoint(unittest.TestCase):
def testPoint(self):
f = 'tests/point.apt'
with Aptofile.create(f,'point') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the point.')
af.setPointName('The Point')
af.setPointDescription('This is a description of a point.')
af.setPointType('boat')
af.setPointGeometry('data:data_describing_the_point')
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testPointInvalidType(self):
f = 'tests/point_invalid_type.apt'
with Aptofile.create(f,'point') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the point.')
af.setPointName('The Point')
af.setPointDescription('This is a description of a point.')
af.setPointType('boat')
af.setPointGeometry('data:data_describing_the_point')
self.assertTrue(af.validate())
af.manifest['point']['object-type'] = 'UFO'
self.assertFalse(Aptofile.validateFile(f))
def testRoute(self):
f = 'tests/route.apt'
with Aptofile.create(f,'route') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the route.')
af.setRouteName('The Route')
af.setRouteDescription('This is a description of the route.')
af.setRouteGeometry('data:data_describing_the_route')
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testRouteMissingGeometry(self):
f = 'tests/route.apt'
with Aptofile.create(f,'route') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the route.')
af.setRouteName('The Route')
af.setRouteDescription('This is a description of the route.')
af.setRouteGeometry('data:data_describing_the_route')
self.assertTrue(af.validate())
del af.manifest['route']['geometry']
self.assertFalse(Aptofile.validateFile(f))
class TestArea(unittest.TestCase):
def testArea(self):
f = 'tests/area.apt'
with Aptofile.create(f,'area') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the area.')
af.setAreaName('The Point')
af.setAreaDescription('This is a description of the area.')
af.setAreaGeometry('data:data_describing_the_area')
self.assertTrue(af.validate())
self.assertTrue(Aptofile.validateFile(f))
def testAreaMissingAreaDescription(self):
f = 'tests/area_missing_area_desc.apt'
with Aptofile.create(f,'area') as af:
af.setGenerator('aptfile.py','Aptomar AS')
af.setDescription('This is a description of the area.')
af.setAreaName('The Point')
af.setAreaDescription('This is a description of a area.')
af.setAreaGeometry('data:data_describing_the_area')
self.assertTrue(af.validate())
del af.manifest['area']['description']
self.assertFalse(Aptofile.validateFile(f))
if __name__=='__main__':
unittest.main()
| bsd-3-clause |
uclouvain/osis | base/migrations/0062_add_uuid_field.py | 2 | 2432 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-05 12:48
from __future__ import unicode_literals
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0061_auto_20160902_1639'),
]
operations = [
migrations.RemoveField(
model_name='domainoffer',
name='domain',
),
migrations.RemoveField(
model_name='domainoffer',
name='offer_year',
),
migrations.AddField(
model_name='academicyear',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='campus',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='offer',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='offerenrollment',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='offeryear',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='offeryeardomain',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='organization',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='person',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='student',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='tutor',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.DeleteModel(
name='DomainOffer',
),
]
| agpl-3.0 |
ojengwa/grr | lib/rdfvalues/checks.py | 2 | 12558 | #!/usr/bin/env python
"""Implementation of check types."""
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib.checks import checks
from grr.lib.checks import filters
from grr.lib.checks import hints
from grr.lib.checks import triggers
from grr.lib.rdfvalues import structs
from grr.proto import checks_pb2
def ValidateMultiple(component, hint):
errors = []
for item in component:
try:
item.Validate()
except (checks.DefinitionError) as e:
errors.append(str(e))
if errors:
raise checks.DefinitionError("%s:\n %s" % (hint, "\n ".join(errors)))
def MatchStrToList(match=None):
# Set a default match type of ANY, if unset.
# Allow multiple match types, either as a list or as a string.
if match is None:
match = ["ANY"]
elif isinstance(match, basestring):
match = match.split()
return match
class CheckResult(structs.RDFProtoStruct):
"""Results of a single check performed on a host."""
protobuf = checks_pb2.CheckResult
def __nonzero__(self):
return bool(self.anomaly)
def ExtendAnomalies(self, other):
"""Merge anomalies from another CheckResult."""
for o in other:
if o is not None:
self.anomaly.Extend(list(o.anomaly))
class CheckResults(structs.RDFProtoStruct):
"""All results for a single host."""
protobuf = checks_pb2.CheckResults
def __nonzero__(self):
return bool(self.result)
class Target(structs.RDFProtoStruct):
"""Definitions of hosts to target."""
protobuf = checks_pb2.Target
def __init__(self, initializer=None, age=None, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Target, self).__init__(initializer=initializer, age=age, **conf)
def __nonzero__(self):
return any([self.cpe, self.os, self.label])
def Validate(self):
if self.cpe:
# TODO(user): Add CPE library to GRR.
pass
if self.os:
pass
if self.label:
pass
class Check(structs.RDFProtoStruct):
"""A definition of a problem, and ways to detect it.
Checks contain an identifier of a problem (check_id) that is a reference to an
externally or internally defined vulnerability.
Checks use one or more Methods to determine if an issue exists. Methods define
data collection and processing, and return an Anomaly if the conditions tested
by the method weren't met.
Checks can define a default platform, OS or environment to target. This
is passed to each Method, but can be overridden by more specific definitions.
"""
protobuf = checks_pb2.Check
def __init__(self, initializer=None, age=None, check_id=None, target=None,
match=None, method=None, hint=None):
super(Check, self).__init__(initializer=initializer, age=age)
self.check_id = check_id
self.match = MatchStrToList(match)
self.hint = Hint(hint, reformat=False)
self.target = target
if method is None:
method = []
self.triggers = triggers.Triggers()
self.matcher = checks.Matcher(self.match, self.hint)
for cfg in method:
# Use the value of "target" as a default for each method, if defined.
# Targets defined in methods or probes override this default value.
if hint:
cfg["hint"] = hints.Overlay(child=cfg.get("hint", {}), parent=hint)
if target:
cfg.setdefault("target", target)
# Create the method and add its triggers to the check.
m = Method(**cfg)
self.method.append(m)
self.triggers.Update(m.triggers, callback=m)
self.artifacts = set([t.artifact for t in self.triggers.conditions])
def SelectChecks(self, conditions):
"""Identifies which check methods to use based on host attributes.
Queries the trigger map for any check methods that apply to a combination of
OS, CPE and/or label.
Args:
conditions: A list of Condition objects.
Returns:
A list of method callbacks that should perform checks.
"""
return self.triggers.Calls(conditions)
def UsesArtifact(self, artifacts):
"""Determines if the check uses the specified artifact.
Args:
artifacts: Either a single artifact name, or a list of artifact names
Returns:
True if the check uses a specific artifact.
"""
# If artifact is a single string, see if it is in the list of artifacts
# as-is. Otherwise, test whether any of the artifacts passed in to this
# function exist in the list of artifacts.
if isinstance(artifacts, basestring):
return artifacts in self.artifacts
else:
return any(True for artifact in artifacts if artifact in self.artifacts)
def Parse(self, conditions, host_data):
"""Runs methods that evaluate whether collected host_data has an issue.
Args:
conditions: A list of conditions to determine which Methods to trigger.
host_data: A map of artifacts and rdf data.
Returns:
A CheckResult populated with Anomalies if an issue exists.
"""
result = CheckResult(check_id=self.check_id)
methods = self.SelectChecks(conditions)
result.ExtendAnomalies([m.Parse(conditions, host_data) for m in methods])
return result
def Validate(self):
"""Check the method is well constructed."""
if not self.check_id:
raise checks.DefinitionError("Check has missing check_id value")
cls_name = self.check_id
if not self.method:
raise checks.DefinitionError("Check %s has no methods" % cls_name)
ValidateMultiple(self.method,
"Check %s has invalid method definitions" % cls_name)
class Method(structs.RDFProtoStruct):
"""A specific test method using 0 or more filters to process data."""
protobuf = checks_pb2.Method
def __init__(self, initializer=None, age=None, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Method, self).__init__(initializer=initializer, age=age)
probe = conf.get("probe", {})
resource = conf.get("resource", {})
hint = conf.get("hint", {})
target = conf.get("target", {})
if hint:
# Add the hint to children.
for cfg in probe:
cfg["hint"] = hints.Overlay(child=cfg.get("hint", {}), parent=hint)
self.probe = [Probe(**cfg) for cfg in probe]
self.hint = Hint(hint, reformat=False)
self.match = MatchStrToList(kwargs.get("match"))
self.matcher = checks.Matcher(self.match, self.hint)
self.resource = [rdfvalue.Dict(**r) for r in resource]
self.target = Target(**target)
self.triggers = triggers.Triggers()
for p in self.probe:
# If the probe has a target, use it. Otherwise, use the method's target.
target = p.target or self.target
self.triggers.Add(p.artifact, target, p)
def Parse(self, conditions, host_data):
"""Runs probes that evaluate whether collected data has an issue.
Args:
conditions: The trigger conditions.
host_data: A map of artifacts and rdf data.
Returns:
Anomalies if an issue exists.
"""
processed = []
probes = self.triggers.Calls(conditions)
for p in probes:
# TODO(user): Need to use the (artifact, rdf_data tuple).
# Get the data required for the probe.
rdf_data = host_data.get(p.artifact)
result = p.Parse(rdf_data)
if result:
processed.append(result)
# Matcher compares the number of probes that triggered with results.
return self.matcher.Detect(probes, processed)
def Validate(self):
"""Check the Method is well constructed."""
ValidateMultiple(self.probe, "Method has invalid probes")
ValidateMultiple(self.target, "Method has invalid target")
ValidateMultiple(self.hint, "Method has invalid hint")
class Probe(structs.RDFProtoStruct):
"""The suite of filters applied to host data."""
protobuf = checks_pb2.Probe
def __init__(self, initializer=None, age=None, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
conf["match"] = MatchStrToList(kwargs.get("match"))
super(Probe, self).__init__(initializer=initializer, age=age, **conf)
if self.filters:
handler = filters.GetHandler(mode=self.mode)
else:
handler = filters.GetHandler()
self.baseliner = handler(artifact=self.artifact, filters=self.baseline)
self.handler = handler(artifact=self.artifact, filters=self.filters)
hinter = Hint(conf.get("hint", {}), reformat=False)
self.matcher = checks.Matcher(conf["match"], hinter)
def Parse(self, rdf_data):
"""Process rdf data through filters. Test if results match expectations.
Processing of rdf data is staged by a filter handler, which manages the
processing of host data. The output of the filters are compared against
expected results.
Args:
rdf_data: An iterable containing 0 or more rdf values.
Returns:
An anomaly if data didn't match expectations.
"""
# TODO(user): Make sure that the filters are called on collected data.
if self.baseline:
comparison = self.baseliner.Parse(rdf_data)
else:
comparison = rdf_data
found = self.handler.Parse(comparison)
results = self.hint.Render(found)
return self.matcher.Detect(comparison, results)
def Validate(self):
"""Check the test set is well constructed."""
ValidateMultiple(self.target, "Probe has invalid target")
self.baseliner.Validate()
self.handler.Validate()
self.hint.Validate()
class Filter(structs.RDFProtoStruct):
"""Generic filter to provide an interface for different types of filter."""
protobuf = checks_pb2.Filter
def __init__(self, initializer=None, age=None, **kwargs):
# FIXME(sebastianw): Probe seems to pass in the configuration for filters
# as a dict in initializer, rather than as kwargs.
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Filter, self).__init__(initializer=initializer, age=age, **conf)
filter_name = self.type or "Filter"
self._filter = filters.Filter.GetFilter(filter_name)
def Parse(self, rdf_data):
"""Process rdf data through the filter.
Filters sift data according to filter rules. Data that passes the filter
rule is kept, other data is dropped.
If no filter method is provided, the data is returned as a list.
Otherwise, a list of parsed data items are returned.
Args:
rdf_data: Host data that has already been processed by a Parser into RDF.
Returns:
A list of data items that matched the filter rules.
"""
if not self._filter:
if isinstance(rdf_data, basestring):
return [rdf_data]
return list(rdf_data)
# TODO(user): filters need to return data as a list if no expression
# is provided.
return [x for x in self._filter.Parse(rdf_data, self.expression)]
def Validate(self):
"""The filter exists, and has valid filter and hint expressions."""
if self.type not in filters.Filter.classes:
raise checks.DefinitionError("Undefined filter type %s" % self.type)
self._filter.Validate(self.expression)
ValidateMultiple(self.hint, "Filter has invalid hint")
class Hint(structs.RDFProtoStruct):
"""Human-formatted descriptions of problems, fixes and findings."""
protobuf = checks_pb2.Hint
def __init__(self, initializer=None, age=None, reformat=True, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Hint, self).__init__(initializer=initializer, age=age, **conf)
if not self.max_results:
self.max_results = config_lib.CONFIG.Get("Checks.max_results")
if reformat:
self.hinter = hints.Hinter(self.format)
else:
self.hinter = hints.Hinter()
def Render(self, rdf_data):
"""Processes data according to formatting rules."""
report_data = rdf_data[:self.max_results]
results = [self.hinter.Render(rdf) for rdf in report_data]
extra = len(rdf_data) - len(report_data)
if extra > 0:
results.append("...plus another %d issues." % extra)
return results
def Explanation(self, state):
"""Creates an anomaly explanation string."""
if self.problem:
return "%s: %s" % (state, self.problem)
def Validate(self):
"""Ensures that required values are set and formatting rules compile."""
# TODO(user): Default format string.
if self.problem:
pass
| apache-2.0 |
CloudBoltSoftware/cloudbolt-forge | ui_extensions/veeam_admin_extension/restore_backup.py | 1 | 1717 | import requests
import time
from xml.dom import minidom
from common.methods import set_progress
from xui.veeam.veeam_admin import VeeamManager
def run(server, *args, **kwargs):
set_progress(f"Starting Veeam Backup restoration... ")
veeam = VeeamManager()
server_ci = veeam.get_connection_info()
url = f'http://{server_ci.ip}:9399/api/vmRestorePoints/' + \
kwargs.get('restore_point_href') + '?action=restore'
session_id = veeam.get_veeam_server_session_id()
header = {"X-RestSvcSessionId": session_id}
response = requests.post(url=url, headers=header)
task = minidom.parseString(response.content.decode('utf-8'))
items = task.getElementsByTagName('Task')[0].attributes.items()
restoration_url = [item for item in items if item[0] == 'Href'][0][-1]
def check_state():
response = requests.get(restoration_url, headers=header)
dom = minidom.parseString(response.content.decode('utf-8'))
state = dom.getElementsByTagName('State')[0]
child = state.firstChild
return child
# Wait until the restoration to completed.
while check_state().data == 'Running':
# wait
set_progress("Waiting for restoration to complete...")
time.sleep(10)
if check_state().data == 'Finished':
set_progress("Server restoration completed successfully")
return "SUCCESS", "Server restoration completed successfully", ""
else:
set_progress("Server restoration didn't complete successfully")
return "FAILURE", "", "Server restoration didn't complete successfully"
| apache-2.0 |
sunlianqiang/kbengine | kbe/src/lib/python/Lib/test/test_pprint.py | 72 | 30339 | # -*- coding: utf-8 -*-
import pprint
import test.support
import unittest
import test.test_set
import random
import collections
import itertools
# list, tuple and dict subclasses that do or don't overwrite __repr__
class list2(list):
pass
class list3(list):
def __repr__(self):
return list.__repr__(self)
class tuple2(tuple):
pass
class tuple3(tuple):
def __repr__(self):
return tuple.__repr__(self)
class set2(set):
pass
class set3(set):
def __repr__(self):
return set.__repr__(self)
class frozenset2(frozenset):
pass
class frozenset3(frozenset):
def __repr__(self):
return frozenset.__repr__(self)
class dict2(dict):
pass
class dict3(dict):
def __repr__(self):
return dict.__repr__(self)
class Unorderable:
def __repr__(self):
return str(id(self))
class QueryTestCase(unittest.TestCase):
def setUp(self):
self.a = list(range(100))
self.b = list(range(200))
self.a[-12] = self.b
def test_basic(self):
# Verify .isrecursive() and .isreadable() w/o recursion
pp = pprint.PrettyPrinter()
for safe in (2, 2.0, 2j, "abc", [3], (2,2), {3: 3}, "yaddayadda",
self.a, self.b):
# module-level convenience functions
self.assertFalse(pprint.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
self.assertTrue(pprint.isreadable(safe),
"expected isreadable for %r" % (safe,))
# PrettyPrinter methods
self.assertFalse(pp.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
self.assertTrue(pp.isreadable(safe),
"expected isreadable for %r" % (safe,))
def test_knotted(self):
# Verify .isrecursive() and .isreadable() w/ recursion
# Tie a knot.
self.b[67] = self.a
# Messy dict.
self.d = {}
self.d[0] = self.d[1] = self.d[2] = self.d
pp = pprint.PrettyPrinter()
for icky in self.a, self.b, self.d, (self.d, self.d):
self.assertTrue(pprint.isrecursive(icky), "expected isrecursive")
self.assertFalse(pprint.isreadable(icky), "expected not isreadable")
self.assertTrue(pp.isrecursive(icky), "expected isrecursive")
self.assertFalse(pp.isreadable(icky), "expected not isreadable")
# Break the cycles.
self.d.clear()
del self.a[:]
del self.b[:]
for safe in self.a, self.b, self.d, (self.d, self.d):
# module-level convenience functions
self.assertFalse(pprint.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
self.assertTrue(pprint.isreadable(safe),
"expected isreadable for %r" % (safe,))
# PrettyPrinter methods
self.assertFalse(pp.isrecursive(safe),
"expected not isrecursive for %r" % (safe,))
self.assertTrue(pp.isreadable(safe),
"expected isreadable for %r" % (safe,))
def test_unreadable(self):
# Not recursive but not readable anyway
pp = pprint.PrettyPrinter()
for unreadable in type(3), pprint, pprint.isrecursive:
# module-level convenience functions
self.assertFalse(pprint.isrecursive(unreadable),
"expected not isrecursive for %r" % (unreadable,))
self.assertFalse(pprint.isreadable(unreadable),
"expected not isreadable for %r" % (unreadable,))
# PrettyPrinter methods
self.assertFalse(pp.isrecursive(unreadable),
"expected not isrecursive for %r" % (unreadable,))
self.assertFalse(pp.isreadable(unreadable),
"expected not isreadable for %r" % (unreadable,))
def test_same_as_repr(self):
# Simple objects, small containers and classes that overwrite __repr__
# For those the result should be the same as repr().
# Ahem. The docs don't say anything about that -- this appears to
# be testing an implementation quirk. Starting in Python 2.5, it's
# not true for dicts: pprint always sorts dicts by key now; before,
# it sorted a dict display if and only if the display required
# multiple lines. For that reason, dicts with more than one element
# aren't tested here.
for simple in (0, 0, 0+0j, 0.0, "", b"",
(), tuple2(), tuple3(),
[], list2(), list3(),
set(), set2(), set3(),
frozenset(), frozenset2(), frozenset3(),
{}, dict2(), dict3(),
self.assertTrue, pprint,
-6, -6, -6-6j, -1.5, "x", b"x", (3,), [3], {3: 6},
(1,2), [3,4], {5: 6},
tuple2((1,2)), tuple3((1,2)), tuple3(range(100)),
[3,4], list2([3,4]), list3([3,4]), list3(range(100)),
set({7}), set2({7}), set3({7}),
frozenset({8}), frozenset2({8}), frozenset3({8}),
dict2({5: 6}), dict3({5: 6}),
range(10, -11, -1)
):
native = repr(simple)
self.assertEqual(pprint.pformat(simple), native)
self.assertEqual(pprint.pformat(simple, width=1, indent=0)
.replace('\n', ' '), native)
self.assertEqual(pprint.saferepr(simple), native)
def test_basic_line_wrap(self):
# verify basic line-wrapping operation
o = {'RPM_cal': 0,
'RPM_cal2': 48059,
'Speed_cal': 0,
'controldesk_runtime_us': 0,
'main_code_runtime_us': 0,
'read_io_runtime_us': 0,
'write_io_runtime_us': 43690}
exp = """\
{'RPM_cal': 0,
'RPM_cal2': 48059,
'Speed_cal': 0,
'controldesk_runtime_us': 0,
'main_code_runtime_us': 0,
'read_io_runtime_us': 0,
'write_io_runtime_us': 43690}"""
for type in [dict, dict2]:
self.assertEqual(pprint.pformat(type(o)), exp)
o = range(100)
exp = '[%s]' % ',\n '.join(map(str, o))
for type in [list, list2]:
self.assertEqual(pprint.pformat(type(o)), exp)
o = tuple(range(100))
exp = '(%s)' % ',\n '.join(map(str, o))
for type in [tuple, tuple2]:
self.assertEqual(pprint.pformat(type(o)), exp)
# indent parameter
o = range(100)
exp = '[ %s]' % ',\n '.join(map(str, o))
for type in [list, list2]:
self.assertEqual(pprint.pformat(type(o), indent=4), exp)
def test_nested_indentations(self):
o1 = list(range(10))
o2 = dict(first=1, second=2, third=3)
o = [o1, o2]
expected = """\
[ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
{ 'first': 1,
'second': 2,
'third': 3}]"""
self.assertEqual(pprint.pformat(o, indent=4, width=42), expected)
def test_sorted_dict(self):
# Starting in Python 2.5, pprint sorts dict displays by key regardless
# of how small the dictionary may be.
# Before the change, on 32-bit Windows pformat() gave order
# 'a', 'c', 'b' here, so this test failed.
d = {'a': 1, 'b': 1, 'c': 1}
self.assertEqual(pprint.pformat(d), "{'a': 1, 'b': 1, 'c': 1}")
self.assertEqual(pprint.pformat([d, d]),
"[{'a': 1, 'b': 1, 'c': 1}, {'a': 1, 'b': 1, 'c': 1}]")
# The next one is kind of goofy. The sorted order depends on the
# alphabetic order of type names: "int" < "str" < "tuple". Before
# Python 2.5, this was in the test_same_as_repr() test. It's worth
# keeping around for now because it's one of few tests of pprint
# against a crazy mix of types.
self.assertEqual(pprint.pformat({"xy\tab\n": (3,), 5: [[]], (): {}}),
r"{5: [[]], 'xy\tab\n': (3,), (): {}}")
def test_ordered_dict(self):
words = 'the quick brown fox jumped over a lazy dog'.split()
d = collections.OrderedDict(zip(words, itertools.count()))
self.assertEqual(pprint.pformat(d),
"""\
{'the': 0,
'quick': 1,
'brown': 2,
'fox': 3,
'jumped': 4,
'over': 5,
'a': 6,
'lazy': 7,
'dog': 8}""")
def test_subclassing(self):
o = {'names with spaces': 'should be presented using repr()',
'others.should.not.be': 'like.this'}
exp = """\
{'names with spaces': 'should be presented using repr()',
others.should.not.be: like.this}"""
self.assertEqual(DottedPrettyPrinter().pformat(o), exp)
def test_set_reprs(self):
self.assertEqual(pprint.pformat(set()), 'set()')
self.assertEqual(pprint.pformat(set(range(3))), '{0, 1, 2}')
self.assertEqual(pprint.pformat(set(range(7)), width=20), '''\
{0,
1,
2,
3,
4,
5,
6}''')
self.assertEqual(pprint.pformat(set2(range(7)), width=20), '''\
set2({0,
1,
2,
3,
4,
5,
6})''')
self.assertEqual(pprint.pformat(set3(range(7)), width=20),
'set3({0, 1, 2, 3, 4, 5, 6})')
self.assertEqual(pprint.pformat(frozenset()), 'frozenset()')
self.assertEqual(pprint.pformat(frozenset(range(3))),
'frozenset({0, 1, 2})')
self.assertEqual(pprint.pformat(frozenset(range(7)), width=20), '''\
frozenset({0,
1,
2,
3,
4,
5,
6})''')
self.assertEqual(pprint.pformat(frozenset2(range(7)), width=20), '''\
frozenset2({0,
1,
2,
3,
4,
5,
6})''')
self.assertEqual(pprint.pformat(frozenset3(range(7)), width=20),
'frozenset3({0, 1, 2, 3, 4, 5, 6})')
@unittest.expectedFailure
#See http://bugs.python.org/issue13907
@test.support.cpython_only
def test_set_of_sets_reprs(self):
# This test creates a complex arrangement of frozensets and
# compares the pretty-printed repr against a string hard-coded in
# the test. The hard-coded repr depends on the sort order of
# frozensets.
#
# However, as the docs point out: "Since sets only define
# partial ordering (subset relationships), the output of the
# list.sort() method is undefined for lists of sets."
#
# In a nutshell, the test assumes frozenset({0}) will always
# sort before frozenset({1}), but:
#
# >>> frozenset({0}) < frozenset({1})
# False
# >>> frozenset({1}) < frozenset({0})
# False
#
# Consequently, this test is fragile and
# implementation-dependent. Small changes to Python's sort
# algorithm cause the test to fail when it should pass.
# XXX Or changes to the dictionary implmentation...
cube_repr_tgt = """\
{frozenset(): frozenset({frozenset({2}), frozenset({0}), frozenset({1})}),
frozenset({0}): frozenset({frozenset(),
frozenset({0, 2}),
frozenset({0, 1})}),
frozenset({1}): frozenset({frozenset(),
frozenset({1, 2}),
frozenset({0, 1})}),
frozenset({2}): frozenset({frozenset(),
frozenset({1, 2}),
frozenset({0, 2})}),
frozenset({1, 2}): frozenset({frozenset({2}),
frozenset({1}),
frozenset({0, 1, 2})}),
frozenset({0, 2}): frozenset({frozenset({2}),
frozenset({0}),
frozenset({0, 1, 2})}),
frozenset({0, 1}): frozenset({frozenset({0}),
frozenset({1}),
frozenset({0, 1, 2})}),
frozenset({0, 1, 2}): frozenset({frozenset({1, 2}),
frozenset({0, 2}),
frozenset({0, 1})})}"""
cube = test.test_set.cube(3)
self.assertEqual(pprint.pformat(cube), cube_repr_tgt)
cubo_repr_tgt = """\
{frozenset({frozenset({0, 2}), frozenset({0})}): frozenset({frozenset({frozenset({0,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
1})}),
frozenset({frozenset(),
frozenset({0})}),
frozenset({frozenset({2}),
frozenset({0,
2})})}),
frozenset({frozenset({0, 1}), frozenset({1})}): frozenset({frozenset({frozenset({0,
1}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
1})}),
frozenset({frozenset({1}),
frozenset({1,
2})}),
frozenset({frozenset(),
frozenset({1})})}),
frozenset({frozenset({1, 2}), frozenset({1})}): frozenset({frozenset({frozenset({1,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({2}),
frozenset({1,
2})}),
frozenset({frozenset(),
frozenset({1})}),
frozenset({frozenset({1}),
frozenset({0,
1})})}),
frozenset({frozenset({1, 2}), frozenset({2})}): frozenset({frozenset({frozenset({1,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({1}),
frozenset({1,
2})}),
frozenset({frozenset({2}),
frozenset({0,
2})}),
frozenset({frozenset(),
frozenset({2})})}),
frozenset({frozenset(), frozenset({0})}): frozenset({frozenset({frozenset({0}),
frozenset({0,
1})}),
frozenset({frozenset({0}),
frozenset({0,
2})}),
frozenset({frozenset(),
frozenset({1})}),
frozenset({frozenset(),
frozenset({2})})}),
frozenset({frozenset(), frozenset({1})}): frozenset({frozenset({frozenset(),
frozenset({0})}),
frozenset({frozenset({1}),
frozenset({1,
2})}),
frozenset({frozenset(),
frozenset({2})}),
frozenset({frozenset({1}),
frozenset({0,
1})})}),
frozenset({frozenset({2}), frozenset()}): frozenset({frozenset({frozenset({2}),
frozenset({1,
2})}),
frozenset({frozenset(),
frozenset({0})}),
frozenset({frozenset(),
frozenset({1})}),
frozenset({frozenset({2}),
frozenset({0,
2})})}),
frozenset({frozenset({0, 1, 2}), frozenset({0, 1})}): frozenset({frozenset({frozenset({1,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
1})}),
frozenset({frozenset({1}),
frozenset({0,
1})})}),
frozenset({frozenset({0}), frozenset({0, 1})}): frozenset({frozenset({frozenset(),
frozenset({0})}),
frozenset({frozenset({0,
1}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
2})}),
frozenset({frozenset({1}),
frozenset({0,
1})})}),
frozenset({frozenset({2}), frozenset({0, 2})}): frozenset({frozenset({frozenset({0,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({2}),
frozenset({1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
2})}),
frozenset({frozenset(),
frozenset({2})})}),
frozenset({frozenset({0, 1, 2}), frozenset({0, 2})}): frozenset({frozenset({frozenset({1,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0,
1}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0}),
frozenset({0,
2})}),
frozenset({frozenset({2}),
frozenset({0,
2})})}),
frozenset({frozenset({1, 2}), frozenset({0, 1, 2})}): frozenset({frozenset({frozenset({0,
2}),
frozenset({0,
1,
2})}),
frozenset({frozenset({0,
1}),
frozenset({0,
1,
2})}),
frozenset({frozenset({2}),
frozenset({1,
2})}),
frozenset({frozenset({1}),
frozenset({1,
2})})})}"""
cubo = test.test_set.linegraph(cube)
self.assertEqual(pprint.pformat(cubo), cubo_repr_tgt)
def test_depth(self):
nested_tuple = (1, (2, (3, (4, (5, 6)))))
nested_dict = {1: {2: {3: {4: {5: {6: 6}}}}}}
nested_list = [1, [2, [3, [4, [5, [6, []]]]]]]
self.assertEqual(pprint.pformat(nested_tuple), repr(nested_tuple))
self.assertEqual(pprint.pformat(nested_dict), repr(nested_dict))
self.assertEqual(pprint.pformat(nested_list), repr(nested_list))
lv1_tuple = '(1, (...))'
lv1_dict = '{1: {...}}'
lv1_list = '[1, [...]]'
self.assertEqual(pprint.pformat(nested_tuple, depth=1), lv1_tuple)
self.assertEqual(pprint.pformat(nested_dict, depth=1), lv1_dict)
self.assertEqual(pprint.pformat(nested_list, depth=1), lv1_list)
def test_sort_unorderable_values(self):
# Issue 3976: sorted pprints fail for unorderable values.
n = 20
keys = [Unorderable() for i in range(n)]
random.shuffle(keys)
skeys = sorted(keys, key=id)
clean = lambda s: s.replace(' ', '').replace('\n','')
self.assertEqual(clean(pprint.pformat(set(keys))),
'{' + ','.join(map(repr, skeys)) + '}')
self.assertEqual(clean(pprint.pformat(frozenset(keys))),
'frozenset({' + ','.join(map(repr, skeys)) + '})')
self.assertEqual(clean(pprint.pformat(dict.fromkeys(keys))),
'{' + ','.join('%r:None' % k for k in skeys) + '}')
# Issue 10017: TypeError on user-defined types as dict keys.
self.assertEqual(pprint.pformat({Unorderable: 0, 1: 0}),
'{1: 0, ' + repr(Unorderable) +': 0}')
# Issue 14998: TypeError on tuples with NoneTypes as dict keys.
keys = [(1,), (None,)]
self.assertEqual(pprint.pformat(dict.fromkeys(keys, 0)),
'{%r: 0, %r: 0}' % tuple(sorted(keys, key=id)))
def test_str_wrap(self):
# pprint tries to wrap strings intelligently
fox = 'the quick brown fox jumped over a lazy dog'
self.assertEqual(pprint.pformat(fox, width=20), """\
'the quick brown '
'fox jumped over '
'a lazy dog'""")
self.assertEqual(pprint.pformat({'a': 1, 'b': fox, 'c': 2},
width=26), """\
{'a': 1,
'b': 'the quick brown '
'fox jumped over '
'a lazy dog',
'c': 2}""")
# With some special characters
# - \n always triggers a new line in the pprint
# - \t and \n are escaped
# - non-ASCII is allowed
# - an apostrophe doesn't disrupt the pprint
special = "Portons dix bons \"whiskys\"\nà l'avocat goujat\t qui fumait au zoo"
self.assertEqual(pprint.pformat(special, width=20), """\
'Portons dix bons '
'"whiskys"\\n'
"à l'avocat "
'goujat\\t qui '
'fumait au zoo'""")
# An unwrappable string is formatted as its repr
unwrappable = "x" * 100
self.assertEqual(pprint.pformat(unwrappable, width=80), repr(unwrappable))
self.assertEqual(pprint.pformat(''), "''")
# Check that the pprint is a usable repr
special *= 10
for width in range(3, 40):
formatted = pprint.pformat(special, width=width)
self.assertEqual(eval("(" + formatted + ")"), special)
def test_compact(self):
o = ([list(range(i * i)) for i in range(5)] +
[list(range(i)) for i in range(6)])
expected = """\
[[], [0], [0, 1, 2, 3],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15],
[], [0], [0, 1], [0, 1, 2], [0, 1, 2, 3],
[0, 1, 2, 3, 4]]"""
self.assertEqual(pprint.pformat(o, width=48, compact=True), expected)
class DottedPrettyPrinter(pprint.PrettyPrinter):
def format(self, object, context, maxlevels, level):
if isinstance(object, str):
if ' ' in object:
return repr(object), 1, 0
else:
return object, 0, 0
else:
return pprint.PrettyPrinter.format(
self, object, context, maxlevels, level)
def test_main():
test.support.run_unittest(QueryTestCase)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
MinimalOS/external_skia | platform_tools/android/tests/utils.py | 146 | 1155 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Common code for tests.
"""
import filecmp
import os
EXPECTATIONS_DIR = os.path.join(os.path.dirname(__file__), 'expectations')
def compare_to_expectation(actual_name, expectation_name, assert_true,
msg=None):
"""Check that a generated file matches its expectation in EXPECTATIONS_DIR.
Assert that the generated file and expectation file are identical.
Args:
actual_name: Full path to the test file.
expectation_name: Basename of the expectations file within which
to compare. The file is expected to be in
platform_tools/android/tests/expectations.
assert_true: function for asserting a statement is True
Args:
condition: statement to check for True.
msg: message to print if the files are not equal.
msg: Message to pass to assert_true.
"""
full_expectations_path = os.path.join(EXPECTATIONS_DIR, expectation_name)
assert_true(filecmp.cmp(actual_name, full_expectations_path), msg)
| bsd-3-clause |
FlorianLudwig/odoo | addons/account_sequence/account_sequence.py | 338 | 2534 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_move(osv.osv):
_inherit = 'account.move'
_columns = {
'internal_sequence_number': fields.char('Internal Number',
readonly=True, copy=False,
help='Internal Sequence Number'),
}
def post(self, cr, uid, ids, context=None):
obj_sequence = self.pool.get('ir.sequence')
res = super(account_move, self).post(cr, uid, ids, context=context)
seq_no = False
for move in self.browse(cr, uid, ids, context=context):
if move.journal_id.internal_sequence_id:
seq_no = obj_sequence.next_by_id(cr, uid, move.journal_id.internal_sequence_id.id, context=context)
if seq_no:
self.write(cr, uid, [move.id], {'internal_sequence_number': seq_no})
return res
class account_journal(osv.osv):
_inherit = "account.journal"
_columns = {
'internal_sequence_id': fields.many2one('ir.sequence', 'Internal Sequence', help="This sequence will be used to maintain the internal number for the journal entries related to this journal."),
}
class account_move_line(osv.osv):
_inherit = "account.move.line"
_columns = {
'internal_sequence_number': fields.related('move_id','internal_sequence_number', type='char', relation='account.move', help='Internal Sequence Number', string='Internal Number'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
archetipo/account-invoicing | account_invoice_line_sort/models/account_invoice.py | 26 | 4940 | # -*- coding: utf-8 -*-
##############################################################################
# This file is part of account_invoice_line_sort, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# account_invoice_line_sort is free software: you can redistribute it
# and/or modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# account_invoice_line_sort is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with account_invoice_line_sort.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from operator import attrgetter
AVAILABLE_SORT_OPTIONS = [
('sequence', 'Sequence'),
('name', 'Description'),
('price_unit', 'Unit Price'),
('price_subtotal', 'Amount'),
]
AVAILABLE_ORDER_OPTIONS = [
('asc', 'Ascending'),
('desc', 'Descending')
]
class account_invoice(models.Model):
_inherit = "account.invoice"
_sort_trigger_fields = ('line_order',
'line_order_direction')
line_order = fields.Selection(AVAILABLE_SORT_OPTIONS,
"Sort Lines By",
default='sequence')
line_order_direction = fields.Selection(AVAILABLE_ORDER_OPTIONS,
"Sort Direction",
default='asc')
@api.model
def get_partner_sort_options(self, partner_id):
res = {}
if partner_id:
p = self.env['res.partner'].browse(partner_id)
res['line_order'] = p.line_order
res['line_order_direction'] = p.line_order_direction
return res
@api.multi
def onchange_partner_id(self, type, partner_id, date_invoice=False,
payment_term=False, partner_bank_id=False,
company_id=False):
res = super(account_invoice,
self).onchange_partner_id(type,
partner_id,
date_invoice=date_invoice,
payment_term=payment_term,
partner_bank_id=partner_bank_id,
company_id=company_id)
if partner_id:
res['value'].update(self.get_partner_sort_options(partner_id))
return res
@api.one
def _sort_account_invoice_line(self):
if self.invoice_line:
sequence = 0
key = attrgetter(self.line_order)
reverse = self.line_order_direction == 'desc'
for line in self.invoice_line.sorted(key=key, reverse=reverse):
sequence += 10
line.sequence = sequence
@api.multi
def write(self, vals):
sort = False
fields = [key for key in vals if key in self._sort_trigger_fields]
if fields:
if [key for key in fields if vals[key] != self[key]]:
sort = True
res = super(account_invoice, self).write(vals)
if sort or 'invoice_line' in vals:
self._sort_account_invoice_line()
return res
@api.model
@api.returns('self', lambda value: value.id)
def create(self, vals):
if not [key for key in vals if key in self._sort_trigger_fields]:
partner_id = vals.get('partner_id', False)
vals.update(self.get_partner_sort_options(partner_id))
invoice = super(account_invoice, self).create(vals)
invoice._sort_account_invoice_line()
return invoice
class account_invoice_line(models.Model):
_inherit = "account.invoice.line"
_sort_trigger_fields = ('name', 'quantity', 'price_unit', 'discount')
@api.multi
def write(self, vals):
sort = False
fields = [key for key in vals if key in self._sort_trigger_fields]
if fields:
if [key for key in fields if vals[key] != self[key]]:
sort = True
res = super(account_invoice_line, self).write(vals)
if sort:
self.invoice_id._sort_account_invoice_line()
return res
@api.model
@api.returns('self', lambda value: value.id)
def create(self, vals):
line = super(account_invoice_line, self).create(vals)
self.invoice_id._sort_account_invoice_line()
return line
| agpl-3.0 |
lckung/spark-ec2 | launch-script/lib/boto-2.34.0/boto/ec2/zone.py | 152 | 2601 | # Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Availability Zone
"""
from boto.ec2.ec2object import EC2Object
class MessageSet(list):
"""
A list object that contains messages associated with
an availability zone.
"""
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'message':
self.append(value)
else:
setattr(self, name, value)
class Zone(EC2Object):
"""
Represents an Availability Zone.
:ivar name: The name of the zone.
:ivar state: The current state of the zone.
:ivar region_name: The name of the region the zone is associated with.
:ivar messages: A list of messages related to the zone.
"""
def __init__(self, connection=None):
super(Zone, self).__init__(connection)
self.name = None
self.state = None
self.region_name = None
self.messages = None
def __repr__(self):
return 'Zone:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'messageSet':
self.messages = MessageSet()
return self.messages
return None
def endElement(self, name, value, connection):
if name == 'zoneName':
self.name = value
elif name == 'zoneState':
self.state = value
elif name == 'regionName':
self.region_name = value
else:
setattr(self, name, value)
| apache-2.0 |
vroyer/elasticassandra | dev-tools/smoke_test_rc.py | 56 | 11737 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Smoke-tests a release candidate
#
# 1. Downloads the tar.gz, deb, RPM and zip file from the staging URL
# 2. Verifies it's sha1 hashes and GPG signatures against the release key
# 3. Installs all official plugins
# 4. Starts one node for tar.gz and zip packages and checks:
# -- if it runs with Java 1.8
# -- if the build hash given is the one that is returned by the status response
# -- if the build is a release version and not a snapshot version
# -- if all plugins are loaded
# -- if the status response returns the correct version
#
# USAGE:
#
# python3 -B ./dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47
#
# to also test other plugins try run
#
# python3 -B ./dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 --plugins license,shield,watcher
#
# Note: Ensure the script is run from the elasticsearch top level directory
#
# For testing a release from sonatype try this:
#
# python3 -B dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 --fetch_url https://oss.sonatype.org/content/repositories/releases/
#
import argparse
import tempfile
import os
from os.path import basename, dirname, isdir, join
import signal
import shutil
import urllib
import urllib.request
import hashlib
import time
import socket
import json
import base64
from urllib.parse import urlparse
from http.client import HTTPConnection
def find_official_plugins():
plugins_dir = join(dirname(dirname(__file__)), 'plugins')
plugins = []
for plugin in os.listdir(plugins_dir):
if isdir(join(plugins_dir, plugin)):
plugins.append(plugin)
return plugins
DEFAULT_PLUGINS = find_official_plugins()
try:
JAVA_HOME = os.environ['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.8*'`""")
# console colors
COLOR_OK = '\033[92m'
COLOR_END = '\033[0m'
def run(command, env_vars=None):
if env_vars:
for key, value in env_vars.items():
os.putenv(key, value)
print('*** Running: %s%s%s' % (COLOR_OK, command, COLOR_END))
if os.system(command):
raise RuntimeError(' FAILED: %s' % (command))
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
def verify_java_version(version):
s = os.popen('%s; java -version 2>&1' % java_exe()).read()
if ' version "%s.' % version not in s:
raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
def sha1(file):
with open(file, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
def read_fully(file):
with open(file, encoding='utf-8') as f:
return f.read()
def wait_for_node_startup(es_dir, timeout=60, header={}):
print(' Waiting until node becomes available for at most %s seconds' % timeout)
for _ in range(timeout):
conn = None
try:
time.sleep(1)
host = get_host_from_ports_file(es_dir)
conn = HTTPConnection(host, timeout=1)
conn.request('GET', '/', headers=header)
res = conn.getresponse()
if res.status == 200:
return True
except IOError as e:
pass
#that is ok it might not be there yet
finally:
if conn:
conn.close()
return False
def download_and_verify(version, hash, files, base_url, plugins=DEFAULT_PLUGINS):
print('Downloading and verifying release %s from %s' % (version, base_url))
tmp_dir = tempfile.mkdtemp()
try:
downloaded_files = []
print(' ' + '*' * 80)
# here we create a temp gpg home where we download the release key as the only key into
# when we verify the signature it will fail if the signed key is not in the keystore and that
# way we keep the executing host unmodified since we don't have to import the key into the default keystore
gpg_home_dir = os.path.join(tmp_dir, "gpg_home_dir")
os.makedirs(gpg_home_dir, 0o700)
run('gpg --homedir %s --keyserver pool.sks-keyservers.net --recv-key D88E42B4' % gpg_home_dir)
for file in files:
name = os.path.basename(file)
print(' Smoketest file: %s' % name)
url = '%s/%s' % (base_url, file)
print(' Downloading %s' % (url))
artifact_path = os.path.join(tmp_dir, file)
downloaded_files.append(artifact_path)
current_artifact_dir = os.path.dirname(artifact_path)
urllib.request.urlretrieve(url, os.path.join(tmp_dir, file))
sha1_url = ''.join([url, '.sha1'])
checksum_file = artifact_path + ".sha1"
print(' Downloading %s' % (sha1_url))
urllib.request.urlretrieve(sha1_url, checksum_file)
print(' Verifying checksum %s' % (checksum_file))
expected = read_fully(checksum_file)
actual = sha1(artifact_path)
if expected != actual :
raise RuntimeError('sha1 hash for %s doesn\'t match %s != %s' % (name, expected, actual))
gpg_url = ''.join([url, '.asc'])
gpg_file = artifact_path + ".asc"
print(' Downloading %s' % (gpg_url))
urllib.request.urlretrieve(gpg_url, gpg_file)
print(' Verifying gpg signature %s' % (gpg_file))
run('cd %s && gpg --homedir %s --verify %s' % (current_artifact_dir, gpg_home_dir, os.path.basename(gpg_file)))
print(' ' + '*' * 80)
print()
smoke_test_release(version, downloaded_files, hash, plugins)
print(' SUCCESS')
finally:
shutil.rmtree(tmp_dir)
def get_host_from_ports_file(es_dir):
return read_fully(os.path.join(es_dir, 'logs/http.ports')).splitlines()[0]
def smoke_test_release(release, files, hash, plugins):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
tmp_dir = tempfile.mkdtemp()
if release_file.endswith('tar.gz'):
run('tar -xzf %s -C %s' % (release_file, tmp_dir))
elif release_file.endswith('zip'):
run('unzip %s -d %s' % (release_file, tmp_dir))
else:
print(' Skip SmokeTest for [%s]' % release_file)
continue # nothing to do here
es_dir = os.path.join(tmp_dir, 'elasticsearch-%s' % (release))
es_run_path = os.path.join(es_dir, 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(es_dir, 'bin/elasticsearch-plugin')
plugin_names = {}
for plugin in plugins:
print(' Install plugin [%s]' % (plugin))
run('%s; export ES_JAVA_OPTS="-Des.plugins.staging=%s"; %s %s %s' % (java_exe(), hash, es_plugin_path, 'install -b', plugin))
plugin_names[plugin] = True
if 'x-pack' in plugin_names:
headers = { 'Authorization' : 'Basic %s' % base64.b64encode(b"es_admin:foobar").decode("UTF-8") }
es_shield_path = os.path.join(es_dir, 'bin/x-pack/users')
print(" Install dummy shield user")
run('%s; %s useradd es_admin -r superuser -p foobar' % (java_exe(), es_shield_path))
else:
headers = {}
print(' Starting elasticsearch deamon from [%s]' % es_dir)
try:
run('%s; %s -Enode.name=smoke_tester -Ecluster.name=prepare_release -Erepositories.url.allowed_urls=http://snapshot.test* %s -Epidfile=%s -Enode.portsfile=true'
% (java_exe(), es_run_path, '-d', os.path.join(es_dir, 'es-smoke.pid')))
if not wait_for_node_startup(es_dir, header=headers):
print("elasticsearch logs:")
print('*' * 80)
logs = read_fully(os.path.join(es_dir, 'logs/prepare_release.log'))
print(logs)
print('*' * 80)
raise RuntimeError('server didn\'t start up')
try: # we now get / and /_nodes to fetch basic infos like hashes etc and the installed plugins
host = get_host_from_ports_file(es_dir)
conn = HTTPConnection(host, timeout=20)
conn.request('GET', '/', headers=headers)
res = conn.getresponse()
if res.status == 200:
version = json.loads(res.read().decode("utf-8"))['version']
if release != version['number']:
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes/plugins?pretty=true', headers=headers)
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'].strip(), False):
raise RuntimeError('Unexpected plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
conn.close()
finally:
pid_path = os.path.join(es_dir, 'es-smoke.pid')
if os.path.exists(pid_path): # try reading the pid and kill the node
pid = int(read_fully(pid_path))
os.kill(pid, signal.SIGKILL)
shutil.rmtree(tmp_dir)
print(' ' + '*' * 80)
print()
def parse_list(string):
return [x.strip() for x in string.split(',')]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SmokeTests a Release Candidate from S3 staging repo')
parser.add_argument('--version', '-v', dest='version', default=None,
help='The Elasticsearch Version to smoke-tests', required=True)
parser.add_argument('--hash', '-s', dest='hash', default=None, required=True,
help='The hash of the unified release')
parser.add_argument('--plugins', '-p', dest='plugins', default=[], required=False, type=parse_list,
help='A list of additional plugins to smoketest')
parser.add_argument('--fetch_url', '-u', dest='url', default=None,
help='Fetched from the specified URL')
parser.set_defaults(hash=None)
parser.set_defaults(plugins=[])
parser.set_defaults(version=None)
parser.set_defaults(url=None)
args = parser.parse_args()
plugins = args.plugins
version = args.version
hash = args.hash
url = args.url
files = [ x % {'version': version} for x in [
'elasticsearch-%(version)s.tar.gz',
'elasticsearch-%(version)s.zip',
'elasticsearch-%(version)s.deb',
'elasticsearch-%(version)s.rpm'
]]
verify_java_version('1.8')
if url:
download_url = url
else:
download_url = 'https://staging.elastic.co/%s-%s/downloads/elasticsearch' % (version, hash)
download_and_verify(version, hash, files, download_url, plugins=DEFAULT_PLUGINS + plugins)
| apache-2.0 |
zhxwmessi/or-tools | examples/python/secret_santa.py | 34 | 3832 | # Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Secret Santa problem in Google CP Solver.
From Ruby Quiz Secret Santa
http://www.rubyquiz.com/quiz2.html
'''
Honoring a long standing tradition started by my wife's dad, my friends
all play a Secret Santa game around Christmas time. We draw names and
spend a week sneaking that person gifts and clues to our identity. On the
last night of the game, we get together, have dinner, share stories, and,
most importantly, try to guess who our Secret Santa was. It's a crazily
fun way to enjoy each other's company during the holidays.
To choose Santas, we use to draw names out of a hat. This system was
tedious, prone to many 'Wait, I got myself...' problems. This year, we
made a change to the rules that further complicated picking and we knew
the hat draw would not stand up to the challenge. Naturally, to solve
this problem, I scripted the process. Since that turned out to be more
interesting than I had expected, I decided to share.
This weeks Ruby Quiz is to implement a Secret Santa selection script.
Your script will be fed a list of names on STDIN.
...
Your script should then choose a Secret Santa for every name in the list.
Obviously, a person cannot be their own Secret Santa. In addition, my friends
no longer allow people in the same family to be Santas for each other and your
script should take this into account.
'''
Comment: This model skips the file input and mail parts. We
assume that the friends are identified with a number from 1..n,
and the families is identified with a number 1..num_families.
Compare with the following model:
* MiniZinc: http://www.hakank.org/minizinc/secret_santa.mzn
This model gives 4089600 solutions and the following statistics:
- failures: 31264
- branches: 8241726
- WallTime: 23735 ms (note: without any printing of the solutions)
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver('Secret Santa problem')
#
# data
#
family = [1, 1, 1, 1, 2, 3, 3, 3, 3, 3, 4, 4]
num_families = max(family)
n = len(family)
#
# declare variables
#
x = [solver.IntVar(0, n - 1, 'x[%i]' % i) for i in range(n)]
#
# constraints
#
solver.Add(solver.AllDifferent(x))
# Can't be one own's Secret Santa
# Ensure that there are no fix-point in the array
for i in range(n):
solver.Add(x[i] != i)
# No Secret Santa to a person in the same family
for i in range(n):
solver.Add(family[i] != solver.Element(family, x[i]))
#
# solution and search
#
db = solver.Phase(x,
solver.INT_VAR_SIMPLE,
solver.INT_VALUE_SIMPLE)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print 'x:', [x[i].Value() for i in range(n)]
print
print 'num_solutions:', num_solutions
print 'failures:', solver.Failures()
print 'branches:', solver.Branches()
print 'WallTime:', solver.WallTime(), 'ms'
if __name__ == '__main__':
main()
| apache-2.0 |
drawks/ansible | lib/ansible/modules/network/f5/bigip_firewall_rule.py | 14 | 42629 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_firewall_rule
short_description: Manage AFM Firewall rules
description:
- Manages firewall rules in an AFM firewall policy. New rules will always be added to the
end of the policy. Rules can be re-ordered using the C(bigip_security_policy) module.
Rules can also be pre-ordered using the C(bigip_security_policy) module and then later
updated using the C(bigip_firewall_rule) module.
version_added: 2.7
options:
name:
description:
- Specifies the name of the rule.
type: str
required: True
parent_policy:
description:
- The policy which contains the rule to be managed.
- One of either C(parent_policy) or C(parent_rule_list) is required.
type: str
parent_rule_list:
description:
- The rule list which contains the rule to be managed.
- One of either C(parent_policy) or C(parent_rule_list) is required.
type: str
action:
description:
- Specifies the action for the firewall rule.
- When C(accept), allows packets with the specified source, destination,
and protocol to pass through the firewall. Packets that match the rule,
and are accepted, traverse the system as if the firewall is not present.
- When C(drop), drops packets with the specified source, destination, and
protocol. Dropping a packet is a silent action with no notification to
the source or destination systems. Dropping the packet causes the connection
to be retried until the retry threshold is reached.
- When C(reject), rejects packets with the specified source, destination,
and protocol. When a packet is rejected the firewall sends a destination
unreachable message to the sender.
- When C(accept-decisively), allows packets with the specified source,
destination, and protocol to pass through the firewall, and does not require
any further processing by any of the further firewalls. Packets that match
the rule, and are accepted, traverse the system as if the firewall is not
present. If the Rule List is applied to a virtual server, management IP,
or self IP firewall rule, then Accept Decisively is equivalent to Accept.
- When creating a new rule, if this parameter is not provided, the default is
C(reject).
type: str
choices:
- accept
- drop
- reject
- accept-decisively
status:
description:
- Indicates the activity state of the rule or rule list.
- When C(disabled), specifies that the rule or rule list does not apply at all.
- When C(enabled), specifies that the system applies the firewall rule or rule
list to the given context and addresses.
- When C(scheduled), specifies that the system applies the rule or rule list
according to the specified schedule.
- When creating a new rule, if this parameter is not provided, the default
is C(enabled).
type: str
choices:
- enabled
- disabled
- scheduled
schedule:
description:
- Specifies a schedule for the firewall rule.
- You configure schedules to define days and times when the firewall rule is
made active.
type: str
description:
description:
- The rule description.
type: str
irule:
description:
- Specifies an iRule that is applied to the firewall rule.
- An iRule can be started when the firewall rule matches traffic.
type: str
protocol:
description:
- Specifies the protocol to which the rule applies.
- Protocols may be specified by either their name or numeric value.
- A special protocol value C(any) can be specified to match any protocol. The
numeric equivalent of this protocol is C(255).
type: str
source:
description:
- Specifies packet sources to which the rule applies.
- Leaving this field blank applies the rule to all addresses and all ports.
- You can specify the following source items. An IPv4 or IPv6 address, an IPv4
or IPv6 address range, geographic location, VLAN, address list, port,
port range, port list or address list.
- You can specify a mix of different types of items for the source address.
suboptions:
address:
description:
- Specifies a specific IP address.
type: str
address_list:
description:
- Specifies an existing address list.
type: str
address_range:
description:
- Specifies an address range.
type: str
country:
description:
- Specifies a country code.
type: str
port:
description:
- Specifies a single numeric port.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: int
port_list:
description:
- Specifes an existing port list.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
port_range:
description:
- Specifies a range of ports, which is two port values separated by
a hyphen. The port to the left of the hyphen should be less than the
port to the right.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
vlan:
description:
- Specifies VLANs to which the rule applies.
- The VLAN source refers to the packet's source.
type: str
type: list
destination:
description:
- Specifies packet destinations to which the rule applies.
- Leaving this field blank applies the rule to all addresses and all ports.
- You can specify the following destination items. An IPv4 or IPv6 address,
an IPv4 or IPv6 address range, geographic location, VLAN, address list, port,
port range, port list or address list.
- You can specify a mix of different types of items for the source address.
suboptions:
address:
description:
- Specifies a specific IP address.
type: str
address_list:
description:
- Specifies an existing address list.
type: str
address_range:
description:
- Specifies an address range.
type: str
country:
description:
- Specifies a country code.
type: str
port:
description:
- Specifies a single numeric port.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: int
port_list:
description:
- Specifes an existing port list.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
port_range:
description:
- Specifies a range of ports, which is two port values separated by
a hyphen. The port to the left of the hyphen should be less than the
port to the right.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
type: list
logging:
description:
- Specifies whether logging is enabled or disabled for the firewall rule.
- When creating a new rule, if this parameter is not specified, the default
if C(no).
type: bool
rule_list:
description:
- Specifies an existing rule list to use in the rule.
- This parameter is mutually exclusive with many of the other individual-rule
specific settings. This includes C(logging), C(action), C(source),
C(destination), C(irule'), C(protocol) and C(logging).
type: str
icmp_message:
description:
- Specifies the Internet Control Message Protocol (ICMP) or ICMPv6 message
C(type) and C(code) that the rule uses.
- This parameter is only relevant when C(protocol) is either C(icmp)(1) or
C(icmpv6)(58).
suboptions:
type:
description:
- Specifies the type of ICMP message.
- You can specify control messages, such as Echo Reply (0) and Destination
Unreachable (3), or you can specify C(any) to indicate that the system
applies the rule for all ICMP messages.
- You can also specify an arbitrary ICMP message.
- The ICMP protocol contains definitions for the existing message type and
number pairs.
type: str
code:
description:
- Specifies the code returned in response to the specified ICMP message type.
- You can specify codes, each set appropriate to the associated type, such
as No Code (0) (associated with Echo Reply (0)) and Host Unreachable (1)
(associated with Destination Unreachable (3)), or you can specify C(any)
to indicate that the system applies the rule for all codes in response to
that specific ICMP message.
- You can also specify an arbitrary code.
- The ICMP protocol contains definitions for the existing message code and
number pairs.
type: str
type: list
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(state) is C(present), ensures that the rule exists.
- When C(state) is C(absent), ensures that the rule is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a new rule in the foo firewall policy
bigip_firewall_rule:
name: foo
parent_policy: policy1
protocol: tcp
source:
- address: 1.2.3.4
- address: "::1"
- address_list: foo-list1
- address_range: 1.1.1.1-2.2.2.2
- vlan: vlan1
- country: US
- port: 22
- port_list: port-list1
- port_range: 80-443
destination:
- address: 1.2.3.4
- address: "::1"
- address_list: foo-list1
- address_range: 1.1.1.1-2.2.2.2
- country: US
- port: 22
- port_list: port-list1
- port_range: 80-443
irule: irule1
action: accept
logging: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create an ICMP specific rule
bigip_firewall_rule:
name: foo
protocol: icmp
icmp_message:
type: 0
source:
- country: US
action: drop
logging: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Add a new rule that is uses an existing rule list
bigip_firewall_rule:
name: foo
rule_list: rule-list1
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
name:
description: Name of the rule.
returned: changed
type: str
sample: FooRule
parent_policy:
description: The policy which contains the rule to be managed.
returned: changed
type: str
sample: FooPolicy
parent_rule_list:
description: The rule list which contains the rule to be managed.
returned: changed
type: str
sample: FooRuleList
action:
description: The action for the firewall rule.
returned: changed
type: str
sample: drop
status:
description: The activity state of the rule or rule list.
returned: changed
type: str
sample: scheduled
schedule:
description: The schedule for the firewall rule.
returned: changed
type: str
sample: Foo_schedule
description:
description: The rule description.
returned: changed
type: str
sample: MyRule
irule:
description: The iRule that is applied to the firewall rule.
returned: changed
type: str
sample: _sys_auth_radius
protocol:
description: The protocol to which the rule applies.
returned: changed
type: str
sample: any
source:
description: The packet sources to which the rule applies
returned: changed
type: complex
contains:
address:
description: A specific IP address.
returned: changed
type: str
sample: 192.168.1.1
address_list:
description: An existing address list.
returned: changed
type: str
sample: foo-list1
address_range:
description: The address range.
returned: changed
type: str
sample: 1.1.1.1-2.2.2.2
country:
description: A country code.
returned: changed
type: str
sample: US
port:
description: Single numeric port.
returned: changed
type: int
sample: 8080
port_list:
description: An existing port list.
returned: changed
type: str
sample: port-list1
port_range:
description: The port range.
returned: changed
type: str
sample: 80-443
vlan:
description: Source VLANs for the packets.
returned: changed
type: str
sample: vlan1
sample: hash/dictionary of values
destination:
description: The packet destinations to which the rule applies.
returned: changed
type: complex
contains:
address:
description: A specific IP address.
returned: changed
type: str
sample: 192.168.1.1
address_list:
description: An existing address list.
returned: changed
type: str
sample: foo-list1
address_range:
description: The address range.
returned: changed
type: str
sample: 1.1.1.1-2.2.2.2
country:
description: A country code.
returned: changed
type: str
sample: US
port:
description: Single numeric port.
returned: changed
type: int
sample: 8080
port_list:
description: An existing port list.
returned: changed
type: str
sample: port-list1
port_range:
description: The port range.
returned: changed
type: str
sample: 80-443
sample: hash/dictionary of values
logging:
description: Enable or Disable logging for the firewall rule.
returned: changed
type: bool
sample: yes
rule_list:
description: An existing rule list to use in the rule.
returned: changed
type: str
sample: rule-list-1
icmp_message:
description: The (ICMP) or ICMPv6 message C(type) and C(code) that the rule uses.
returned: changed
type: complex
contains:
type:
description: The type of ICMP message.
returned: changed
type: str
sample: 0
code:
description: The code returned in response to the specified ICMP message type.
returned: changed
type: str
sample: 1
sample: hash/dictionary of values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'ipProtocol': 'protocol',
'log': 'logging',
'icmp': 'icmp_message',
}
api_attributes = [
'irule',
'ipProtocol',
'log',
'schedule',
'status',
'destination',
'source',
'icmp',
'action',
'description',
]
returnables = [
'logging',
'protocol',
'irule',
'source',
'destination',
'action',
'status',
'schedule',
'description',
'icmp_message',
]
updatables = [
'logging',
'protocol',
'irule',
'source',
'destination',
'action',
'status',
'schedule',
'description',
'icmp_message',
]
protocol_map = {
'1': 'icmp',
'6': 'tcp',
'17': 'udp',
'58': 'icmpv6',
'255': 'any',
}
class ApiParameters(Parameters):
@property
def logging(self):
if self._values['logging'] is None:
return None
if self._values['logging'] == 'yes':
return True
return False
@property
def protocol(self):
if self._values['protocol'] is None:
return None
if self._values['protocol'] in self.protocol_map:
return self.protocol_map[self._values['protocol']]
return self._values['protocol']
@property
def source(self):
result = []
if self._values['source'] is None:
return None
v = self._values['source']
if 'addressLists' in v:
result += [('address_list', x) for x in v['addressLists']]
if 'vlans' in v:
result += [('vlan', x) for x in v['vlans']]
if 'geo' in v:
result += [('geo', x['name']) for x in v['geo']]
if 'addresses' in v:
result += [('address', x['name']) for x in v['addresses']]
if 'ports' in v:
result += [('port', str(x['name'])) for x in v['ports']]
if 'portLists' in v:
result += [('port_list', x) for x in v['portLists']]
if result:
return result
return None
@property
def destination(self):
result = []
if self._values['destination'] is None:
return None
v = self._values['destination']
if 'addressLists' in v:
result += [('address_list', x) for x in v['addressLists']]
if 'geo' in v:
result += [('geo', x['name']) for x in v['geo']]
if 'addresses' in v:
result += [('address', x['name']) for x in v['addresses']]
if 'ports' in v:
result += [('port', x['name']) for x in v['ports']]
if 'portLists' in v:
result += [('port_list', x) for x in v['portLists']]
if result:
return result
return None
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = [x['name'] for x in self._values['icmp_message']]
return result
class ModuleParameters(Parameters):
@property
def irule(self):
if self._values['irule'] is None:
return None
if self._values['irule'] == '':
return ''
return fq_name(self.partition, self._values['irule'])
@property
def description(self):
if self._values['description'] is None:
return None
if self._values['description'] == '':
return ''
return self._values['description']
@property
def schedule(self):
if self._values['schedule'] is None:
return None
if self._values['schedule'] == '':
return ''
return fq_name(self.partition, self._values['schedule'])
@property
def source(self):
result = []
if self._values['source'] is None:
return None
for x in self._values['source']:
if 'address' in x and x['address'] is not None:
result += [('address', x['address'])]
elif 'address_range' in x and x['address_range'] is not None:
result += [('address', x['address_range'])]
elif 'address_list' in x and x['address_list'] is not None:
result += [('address_list', x['address_list'])]
elif 'country' in x and x['country'] is not None:
result += [('geo', x['country'])]
elif 'vlan' in x and x['vlan'] is not None:
result += [('vlan', fq_name(self.partition, x['vlan']))]
elif 'port' in x and x['port'] is not None:
result += [('port', str(x['port']))]
elif 'port_range' in x and x['port_range'] is not None:
result += [('port', x['port_range'])]
elif 'port_list' in x and x['port_list'] is not None:
result += [('port_list', fq_name(self.partition, x['port_list']))]
if result:
return result
return None
@property
def destination(self):
result = []
if self._values['destination'] is None:
return None
for x in self._values['destination']:
if 'address' in x and x['address'] is not None:
result += [('address', x['address'])]
elif 'address_range' in x and x['address_range'] is not None:
result += [('address', x['address_range'])]
elif 'address_list' in x and x['address_list'] is not None:
result += [('address_list', x['address_list'])]
elif 'country' in x and x['country'] is not None:
result += [('geo', x['country'])]
elif 'port' in x and x['port'] is not None:
result += [('port', str(x['port']))]
elif 'port_range' in x and x['port_range'] is not None:
result += [('port', x['port_range'])]
elif 'port_list' in x and x['port_list'] is not None:
result += [('port_list', fq_name(self.partition, x['port_list']))]
if result:
return result
return None
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = []
for x in self._values['icmp_message']:
type = x.get('type', '255')
code = x.get('code', '255')
if type is None or type == 'any':
type = '255'
if code is None or code == 'any':
code = '255'
if type == '255' and code == '255':
result.append("255")
elif type == '255' and code != '255':
raise F5ModuleError(
"A type of 'any' (255) requires a code of 'any'."
)
elif code == '255':
result.append(type)
else:
result.append('{0}:{1}'.format(type, code))
result = list(set(result))
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def logging(self):
if self._values['logging'] is None:
return None
if self._values['logging'] is True:
return "yes"
return "no"
@property
def source(self):
if self._values['source'] is None:
return None
result = dict(
addresses=[],
addressLists=[],
vlans=[],
geo=[],
ports=[],
portLists=[]
)
for x in self._values['source']:
if x[0] == 'address':
result['addresses'].append({'name': x[1]})
elif x[0] == 'address_list':
result['addressLists'].append(x[1])
elif x[0] == 'vlan':
result['vlans'].append(x[1])
elif x[0] == 'geo':
result['geo'].append({'name': x[1]})
elif x[0] == 'port':
result['ports'].append({'name': str(x[1])})
elif x[0] == 'port_list':
result['portLists'].append(x[1])
return result
@property
def destination(self):
if self._values['destination'] is None:
return None
result = dict(
addresses=[],
addressLists=[],
vlans=[],
geo=[],
ports=[],
portLists=[]
)
for x in self._values['destination']:
if x[0] == 'address':
result['addresses'].append({'name': x[1]})
elif x[0] == 'address_list':
result['addressLists'].append(x[1])
elif x[0] == 'geo':
result['geo'].append({'name': x[1]})
elif x[0] == 'port':
result['ports'].append({'name': str(x[1])})
elif x[0] == 'port_list':
result['portLists'].append(x[1])
return result
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = []
for x in self._values['icmp_message']:
result.append({'name': x})
return result
class ReportableChanges(Changes):
@property
def source(self):
if self._values['source'] is None:
return None
result = []
v = self._values['source']
if v['addressLists']:
result += [('address_list', x) for x in v['addressLists']]
if v['vlans']:
result += [('vlan', x) for x in v['vlans']]
if v['geo']:
result += [('geo', x['name']) for x in v['geo']]
if v['addresses']:
result += [('address', x['name']) for x in v['addresses']]
if v['ports']:
result += [('port', str(x)) for x in v['ports']]
if v['portLists']:
result += [('port_list', x['name']) for x in v['portLists']]
if result:
return dict(result)
return None
@property
def destination(self):
if self._values['destination'] is None:
return None
result = []
v = self._values['destination']
if v['addressLists']:
result += [('address_list', x) for x in v['addressLists']]
if v['geo']:
result += [('geo', x['name']) for x in v['geo']]
if v['addresses']:
result += [('address', x['name']) for x in v['addresses']]
if v['ports']:
result += [('port', str(x)) for x in v['ports']]
if v['portLists']:
result += [('port_list', x['name']) for x in v['portLists']]
if result:
return dict(result)
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def irule(self):
if self.want.irule is None:
return None
if self.have.irule is None and self.want.irule == '':
return None
if self.have.irule is None:
return self.want.irule
if self.want.irule != self.have.irule:
return self.want.irule
@property
def description(self):
if self.want.description is None:
return None
if self.have.description is None and self.want.description == '':
return None
if self.have.description is None:
return self.want.description
if self.want.description != self.have.description:
return self.want.description
@property
def source(self):
if self.want.source is None:
return None
if self.want.source is None and self.have.source is None:
return None
if self.have.source is None:
return self.want.source
if set(self.want.source) != set(self.have.source):
return self.want.source
@property
def destination(self):
if self.want.destination is None:
return None
if self.want.destination is None and self.have.destination is None:
return None
if self.have.destination is None:
return self.want.destination
if set(self.want.destination) != set(self.have.destination):
return self.want.destination
@property
def icmp_message(self):
if self.want.icmp_message is None:
return None
if self.want.icmp_message is None and self.have.icmp_message is None:
return None
if self.have.icmp_message is None:
return self.want.icmp_message
if set(self.want.icmp_message) != set(self.have.icmp_message):
return self.want.icmp_message
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
resp = self.client.api.get(uri)
if resp.ok:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.want.rule_list is None and self.want.parent_rule_list is None:
if self.want.action is None:
self.changes.update({'action': 'reject'})
if self.want.logging is None:
self.changes.update({'logging': False})
if self.want.status is None:
self.changes.update({'status': 'enabled'})
if self.want.status == 'scheduled' and self.want.schedule is None:
raise F5ModuleError(
"A 'schedule' must be specified when 'status' is 'scheduled'."
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
params['placeAfter'] = 'last'
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
)
if self.changes.protocol not in ['icmp', 'icmpv6']:
if self.changes.icmp_message is not None:
raise F5ModuleError(
"The 'icmp_message' can only be specified when 'protocol' is 'icmp' or 'icmpv6'."
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
if self.have.protocol not in ['icmp', 'icmpv6'] and self.changes.protocol not in ['icmp', 'icmpv6']:
if self.changes.icmp_message is not None:
raise F5ModuleError(
"The 'icmp_message' can only be specified when 'protocol' is 'icmp' or 'icmpv6'."
)
if self.changes.protocol in ['icmp', 'icmpv6']:
self.changes.update({'source': {}})
self.changes.update({'destination': {}})
params = self.changes.api_params()
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent_policy=dict(),
parent_rule_list=dict(),
logging=dict(type='bool'),
protocol=dict(),
irule=dict(),
description=dict(),
source=dict(
type='list',
elements='dict',
options=dict(
address=dict(),
address_list=dict(),
address_range=dict(),
country=dict(),
port=dict(type='int'),
port_list=dict(),
port_range=dict(),
vlan=dict(),
),
mutually_exclusive=[[
'address', 'address_list', 'address_range', 'country', 'vlan',
'port', 'port_range', 'port_list'
]]
),
destination=dict(
type='list',
elements='dict',
options=dict(
address=dict(),
address_list=dict(),
address_range=dict(),
country=dict(),
port=dict(type='int'),
port_list=dict(),
port_range=dict(),
),
mutually_exclusive=[[
'address', 'address_list', 'address_range', 'country',
'port', 'port_range', 'port_list'
]]
),
action=dict(
choices=['accept', 'drop', 'reject', 'accept-decisively']
),
status=dict(
choices=['enabled', 'disabled', 'scheduled']
),
schedule=dict(),
rule_list=dict(),
icmp_message=dict(
type='list',
elements='dict',
options=dict(
type=dict(),
code=dict(),
)
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['rule_list', 'action'],
['rule_list', 'source'],
['rule_list', 'destination'],
['rule_list', 'irule'],
['rule_list', 'protocol'],
['rule_list', 'logging'],
['parent_policy', 'parent_rule_list']
]
self.required_one_of = [
['parent_policy', 'parent_rule_list']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive,
required_one_of=spec.required_one_of
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
ArchiveTeam/spuf-grab | pipeline.py | 1 | 11245 | # encoding=utf8
import datetime
from distutils.version import StrictVersion
import hashlib
import os.path
import random
from seesaw.config import realize, NumberConfigValue
from seesaw.externalprocess import ExternalProcess
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.task import SimpleTask, LimitConcurrent
from seesaw.tracker import GetItemFromTracker, PrepareStatsForTracker, \
UploadWithTracker, SendDoneToTracker
import shutil
import socket
import subprocess
import sys
import time
import string
import seesaw
from seesaw.externalprocess import WgetDownload
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.util import find_executable
# check the seesaw version
if StrictVersion(seesaw.__version__) < StrictVersion("0.8.5"):
raise Exception("This pipeline needs seesaw version 0.8.5 or higher.")
###########################################################################
# Find a useful Wget+Lua executable.
#
# WGET_LUA will be set to the first path that
# 1. does not crash with --version, and
# 2. prints the required version string
WGET_LUA = find_executable(
"Wget+Lua",
["GNU Wget 1.14.lua.20130523-9a5c", "GNU Wget 1.14.lua.20160530-955376b"],
[
"./wget-lua",
"./wget-lua-warrior",
"./wget-lua-local",
"../wget-lua",
"../../wget-lua",
"/home/warrior/wget-lua",
"/usr/bin/wget-lua"
]
)
if not WGET_LUA:
raise Exception("No usable Wget+Lua found.")
###########################################################################
# The version number of this pipeline definition.
#
# Update this each time you make a non-cosmetic change.
# It will be added to the WARC files and reported to the tracker.
VERSION = "20170615.01"
USER_AGENT = 'ArchiveTeam'
TRACKER_ID = 'spuf'
TRACKER_HOST = 'tracker.archiveteam.org'
###########################################################################
# This section defines project-specific tasks.
#
# Simple tasks (tasks that do not need any concurrency) are based on the
# SimpleTask class and have a process(item) method that is called for
# each item.
class CheckIP(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckIP")
self._counter = 0
def process(self, item):
# NEW for 2014! Check if we are behind firewall/proxy
if self._counter <= 0:
item.log_output('Checking IP address.')
ip_set = set()
ip_set.add(socket.gethostbyname('twitter.com'))
ip_set.add(socket.gethostbyname('facebook.com'))
ip_set.add(socket.gethostbyname('youtube.com'))
ip_set.add(socket.gethostbyname('microsoft.com'))
ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))
ip_set.add(socket.gethostbyname('archiveteam.org'))
if len(ip_set) != 6:
item.log_output('Got IP addresses: {0}'.format(ip_set))
item.log_output(
'Are you behind a firewall/proxy? That is a big no-no!')
raise Exception(
'Are you behind a firewall/proxy? That is a big no-no!')
# Check only occasionally
if self._counter <= 0:
self._counter = 10
else:
self._counter -= 1
class PrepareDirectories(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "PrepareDirectories")
self.warc_prefix = warc_prefix
def process(self, item):
item_name = item["item_name"]
escaped_item_name = item_name.replace(':', '_').replace('/', '_').replace('~', '_')
dirname = "/".join((item["data_dir"], escaped_item_name))
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
item["item_dir"] = dirname
item["warc_file_base"] = "%s-%s-%s" % (self.warc_prefix, escaped_item_name,
time.strftime("%Y%m%d-%H%M%S"))
open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close()
class MoveFiles(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "MoveFiles")
def process(self, item):
# NEW for 2014! Check if wget was compiled with zlib support
if os.path.exists("%(item_dir)s/%(warc_file_base)s.warc" % item):
raise Exception('Please compile wget with zlib support!')
os.rename("%(item_dir)s/%(warc_file_base)s.warc.gz" % item,
"%(data_dir)s/%(warc_file_base)s.warc.gz" % item)
shutil.rmtree("%(item_dir)s" % item)
def get_hash(filename):
with open(filename, 'rb') as in_file:
return hashlib.sha1(in_file.read()).hexdigest()
CWD = os.getcwd()
PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))
LUA_SHA1 = get_hash(os.path.join(CWD, 'spuf.lua'))
def stats_id_function(item):
# NEW for 2014! Some accountability hashes and stats.
d = {
'pipeline_hash': PIPELINE_SHA1,
'lua_hash': LUA_SHA1,
'python_version': sys.version,
}
return d
class WgetArgs(object):
def realize(self, item):
wget_args = [
WGET_LUA,
"-U", USER_AGENT,
"-nv",
"--load-cookies", "cookies.txt",
#"--no-cookies",
"--lua-script", "spuf.lua",
"-o", ItemInterpolation("%(item_dir)s/wget.log"),
"--no-check-certificate",
"--output-document", ItemInterpolation("%(item_dir)s/wget.tmp"),
"--truncate-output",
"-e", "robots=off",
"--rotate-dns",
"--recursive", "--level=inf",
"--no-parent",
"--page-requisites",
"--timeout", "30",
"--tries", "inf",
"--domains", "steampowered.com",
"--span-hosts",
"--waitretry", "30",
"--warc-file", ItemInterpolation("%(item_dir)s/%(warc_file_base)s"),
"--warc-header", "operator: Archive Team",
"--warc-header", "steam-users-forum-dld-script-version: " + VERSION,
"--warc-header", ItemInterpolation("steam-users-forum-item: %(item_name)s"),
]
item_name = item['item_name']
assert ':' in item_name
item_type, item_value = item_name.split(':', 1)
item['item_type'] = item_type
item['item_value'] = item_value
tries = 0
while tries < 10:
if os.path.isfile('login.php?do=login'):
os.remove('login.php?do=login')
os.system("wget --save-cookies cookies.txt --user-agent 'ArchiveTeam' --keep-session-cookies --post-data 'vb_login_username=archiveTeam&cookieuser=1&vb_login_password=&s=&securitytoken=guest&do=login&vb_login_md5password=9aa65d84012ee50e456c4e6916089636&vb_login_md5password_utf=9aa65d84012ee50e456c4e6916089636' --referer http://forums.steampowered.com/forums/ http://forums.steampowered.com/forums/login.php?do=login")
if not os.path.isfile('login.php?do=login'):
continue
with open('login.php?do=login') as f:
if 'alt="Forum Database Error"' in f.read():
continue
break
else:
raise Exception('Could not log in.')
wget_args.append('http://forums.steampowered.com/forums/showthread.php')
if item_type == 'threads':
start, stop = item_value.split('-')
for i in range(int(start), int(stop)+1):
wget_args.extend(['--warc-header', 'steam-users-forum-thread: {i}'.format(i=i)])
wget_args.append('http://forums.steampowered.com/forums/showthread.php?t={i}'.format(i=i))
elif item_type == 'forums':
start, stop = item_value.split('-')
for i in range(int(start), int(stop)+1):
wget_args.extend(['--warc-header', 'steam-users-forum-forum: {i}'.format(i=i)])
wget_args.append('http://forums.steampowered.com/forums/forumdisplay.php?f={i}&daysprune=-1'.format(i=i))
wget_args.append('http://forums.steampowered.com/forums/forumdisplay.php?f={i}'.format(i=i))
elif item_type == 'members':
start, stop = item_value.split('-')
for i in range(int(start), int(stop)+1):
wget_args.extend(['--warc-header', 'steam-users-forum-member: {i}'.format(i=i)])
wget_args.append('http://forums.steampowered.com/forums/member.php?u={i}'.format(i=i))
else:
raise Exception('Unknown item')
if 'bind_address' in globals():
wget_args.extend(['--bind-address', globals()['bind_address']])
print('')
print('*** Wget will bind address at {0} ***'.format(
globals()['bind_address']))
print('')
return realize(wget_args, item)
###########################################################################
# Initialize the project.
#
# This will be shown in the warrior management panel. The logo should not
# be too big. The deadline is optional.
project = Project(
title = "Steam Users' Forum",
project_html = """
<img class="project-logo" alt="Steam Logo" src="http://archiveteam.org/images/thumb/4/48/Steam_Icon_2014.png/100px-Steam_Icon_2014.png" />
<h2>Steam Users' Forum <span class="links"><a href="http://forums.steampowered.com/forums">Website</a> · <a href="http://tracker.archiveteam.org/spuf/">Leaderboard</a></span></h2>
<p>Getting killed June 5th.</p>
""",
utc_deadline = datetime.datetime(2017, 6, 4, 23, 59, 0)
)
pipeline = Pipeline(
CheckIP(),
GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader,
VERSION),
PrepareDirectories(warc_prefix="spuf"),
WgetDownload(
WgetArgs(),
max_tries=2,
accept_on_exit_code=[0, 4, 8],
env={
"item_dir": ItemValue("item_dir"),
"item_value": ItemValue("item_value"),
"item_type": ItemValue("item_type"),
"warc_file_base": ItemValue("warc_file_base"),
}
),
PrepareStatsForTracker(
defaults={"downloader": downloader, "version": VERSION},
file_groups={
"data": [
ItemInterpolation("%(item_dir)s/%(warc_file_base)s.warc.gz")
]
},
id_function=stats_id_function,
),
MoveFiles(),
LimitConcurrent(NumberConfigValue(min=1, max=4, default="1",
name="shared:rsync_threads", title="Rsync threads",
description="The maximum number of concurrent uploads."),
UploadWithTracker(
"http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
downloader=downloader,
version=VERSION,
files=[
ItemInterpolation("%(data_dir)s/%(warc_file_base)s.warc.gz")
],
rsync_target_source_path=ItemInterpolation("%(data_dir)s/"),
rsync_extra_args=[
"--recursive",
"--partial",
"--partial-dir", ".rsync-tmp",
]
),
),
SendDoneToTracker(
tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
stats=ItemValue("stats")
)
)
| unlicense |
fffonion/you-get | src/you_get/extractors/miomio.py | 19 | 1195 | #!/usr/bin/env python
__all__ = ['miomio_download']
from ..common import *
from .sina import sina_download_by_xml
from .tudou import tudou_download_by_id
from .youku import youku_download_by_vid
def miomio_download(url, output_dir = '.', merge = True, info_only = False):
html = get_html(url)
title = r1(r'<meta name="description" content="([^"]*)"', html)
flashvars = r1(r'flashvars="(type=[^"]*)"', html)
t = r1(r'type=(\w+)', flashvars)
id = r1(r'vid=([^"]+)', flashvars)
if t == 'youku':
youku_download_by_vid(id, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
elif t == 'tudou':
tudou_download_by_id(id, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif t == 'sina' or t=='video':
url = "http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?vid=" + id
xml = get_content (url, headers=fake_headers, decoded=True)
sina_download_by_xml(xml, title, output_dir=output_dir, merge=merge, info_only=info_only)
else:
raise NotImplementedError(flashvars)
site_info = "MioMio.tv"
download = miomio_download
download_playlist = playlist_not_supported('miomio')
| mit |
m4ns0ur/grumpy | third_party/stdlib/getopt.py | 31 | 7319 | """Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <[email protected]>.
#
# Gerrit Holl <[email protected]> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Astrand <[email protected]> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - an option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-' and args[0] != '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError('option --%s requires argument' % opt, opt)
optarg, args = args[0], args[1:]
elif optarg is not None:
raise GetoptError('option --%s must not have an argument' % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError('option --%s not recognized' % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError('option --%s not a unique prefix' % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError('option -%s requires argument' % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError('option -%s not recognized' % opt, opt)
if __name__ == '__main__':
import sys
print getopt(sys.argv[1:], "a:b", ["alpha=", "beta"])
| apache-2.0 |
geggo/pyface | pyface/tree/api.py | 1 | 1198 | #------------------------------------------------------------------------------
# Copyright (c) 2005-2011, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
from __future__ import absolute_import
from .node_event import NodeEvent
from .node_monitor import NodeMonitor
from .node_manager import NodeManager
from .node_tree import NodeTree
from .node_tree_model import NodeTreeModel
from .node_type import NodeType
from .trait_dict_node_type import TraitDictNodeType
from .trait_list_node_type import TraitListNodeType
from .tree_model import TreeModel
from traits.etsconfig.api import ETSConfig
if ETSConfig.toolkit == 'wx':
# Tree has not yet been ported to qt
from .tree import Tree
del ETSConfig
| bsd-3-clause |
vqw/frappe | frappe/commands/translate.py | 6 | 2622 | from __future__ import unicode_literals, absolute_import
import click
import frappe
from frappe.commands import pass_context, get_site
# translation
@click.command('build-message-files')
@pass_context
def build_message_files(context):
"Build message files for translation"
import frappe.translate
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.rebuild_all_translation_files()
finally:
frappe.destroy()
@click.command('new-language') #, help="Create lang-code.csv for given app")
@pass_context
@click.argument('lang_code') #, help="Language code eg. en")
@click.argument('app') #, help="App name eg. frappe")
def new_language(context, lang_code, app):
"""Create lang-code.csv for given app"""
import frappe.translate
if not context['sites']:
raise Exception('--site is required')
# init site
frappe.connect(site=context['sites'][0])
frappe.translate.write_translations_file(app, lang_code)
print "File created at ./apps/{app}/{app}/translations/{lang_code}.csv".format(app=app, lang_code=lang_code)
print "You will need to add the language in frappe/geo/languages.json, if you haven't done it already."
@click.command('get-untranslated')
@click.argument('lang')
@click.argument('untranslated_file')
@click.option('--all', default=False, is_flag=True, help='Get all message strings')
@pass_context
def get_untranslated(context, lang, untranslated_file, all=None):
"Get untranslated strings for language"
import frappe.translate
site = get_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.get_untranslated(lang, untranslated_file, get_all=all)
finally:
frappe.destroy()
@click.command('update-translations')
@click.argument('lang')
@click.argument('untranslated_file')
@click.argument('translated-file')
@pass_context
def update_translations(context, lang, untranslated_file, translated_file):
"Update translated strings"
import frappe.translate
site = get_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.update_translations(lang, untranslated_file, translated_file)
finally:
frappe.destroy()
@click.command('import-translations')
@click.argument('lang')
@click.argument('path')
@pass_context
def import_translations(context, lang, path):
"Update translated strings"
import frappe.translate
site = get_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.import_translations(lang, path)
finally:
frappe.destroy()
commands = [
build_message_files,
get_untranslated,
import_translations,
new_language,
update_translations,
]
| mit |
Encrylize/flask-blogger | app/utils/helpers.py | 1 | 1218 | from urllib.parse import urljoin, urlparse
from flask import request
def get_or_create(model, **kwargs):
"""
Gets or creates an instance of model.
Args:
model: SQLAlchemy model
**kwargs: Model properties
Returns:
An instance of model and True if it was created, False if it was not.
"""
instance = model.query.filter_by(**kwargs).first()
if instance:
return instance, False
else:
instance = model(**kwargs)
return instance, True
def is_safe_url(target):
"""
Checks if a URL is safe.
Args:
target: The URL to check
Returns:
True if the URL is safe, False if it is not.
"""
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http',
'https') and ref_url.netloc == test_url.netloc
def get_redirect_target():
"""
Gets a safe redirect target.
Returns:
The first safe redirect target.
"""
for target in request.args.get('next'), request.referrer:
if not target:
continue
elif is_safe_url(target):
return target
| mit |
aperigault/ansible | lib/ansible/modules/windows/win_eventlog_entry.py | 38 | 2212 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Andrew Saraceni <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_eventlog_entry
version_added: "2.4"
short_description: Write entries to Windows event logs
description:
- Write log entries to a given event log from a specified source.
options:
log:
description:
- Name of the event log to write an entry to.
type: str
required: yes
source:
description:
- Name of the log source to indicate where the entry is from.
type: str
required: yes
event_id:
description:
- The numeric event identifier for the entry.
- Value must be between 0 and 65535.
type: int
required: yes
message:
description:
- The message for the given log entry.
type: str
required: yes
entry_type:
description:
- Indicates the entry being written to the log is of a specific type.
type: str
choices: [ Error, FailureAudit, Information, SuccessAudit, Warning ]
category:
description:
- A numeric task category associated with the category message file for the log source.
type: int
raw_data:
description:
- Binary data associated with the log entry.
- Value must be a comma-separated array of 8-bit unsigned integers (0 to 255).
type: str
notes:
- This module will always report a change when writing an event entry.
seealso:
- module: win_eventlog
author:
- Andrew Saraceni (@andrewsaraceni)
'''
EXAMPLES = r'''
- name: Write an entry to a Windows event log
win_eventlog_entry:
log: MyNewLog
source: NewLogSource1
event_id: 1234
message: This is a test log entry.
- name: Write another entry to a different Windows event log
win_eventlog_entry:
log: AnotherLog
source: MyAppSource
event_id: 5000
message: An error has occurred.
entry_type: Error
category: 5
raw_data: 10,20
'''
RETURN = r'''
# Default return values
'''
| gpl-3.0 |
ygol/dotfiles | link/hammerspoon/hs/node_modules/node-gyp/gyp/tools/graphviz.py | 2679 | 2878 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
coupdair/pyoptools | pyoptools/misc/GS/gs.py | 9 | 10699 | from mako.template import Template
from pyoptools.misc.resources import has_double_support, has_amd_double_support
### ojo, toca solucionar esta importacion en caso de que no exista pypencl
try:
from pyfft.cl import Plan
import pyopencl as cl
import pyopencl.array as cl_array
except:
pass
from numpy.fft import fft2,ifft2,fftshift,ifftshift
from numpy import angle,exp,pi, complex128, zeros, sqrt,int32, zeros_like,ones
from numpy.random import random
from pylab import imshow,colorbar
KERNEL= \
"""
//There are some operations that are not defined in the RV770 GPUs
// for doubles, so a cast to float is needed
% if double_support:
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#define CAST (double)
% elif amd_double_support:
#pragma OPENCL EXTENSION cl_amd_fp64: enable
#define CAST (float)
% endif
__kernel void norm(__global double2 *data)
{
int nWidth = get_global_size(0);
int nHeight = get_global_size(1);
int ox=get_global_id(0); // Toma los indices en X
int oy=get_global_id(1); // Toma los indices en Y
int i= oy*nWidth+ox;
double norm=sqrt(CAST(data[i].x*data[i].x+data[i].y*data[i].y));
if (norm>0)
{
data[i].x=data[i].x/norm;
data[i].y=data[i].y/norm;
}gs
else
{
data[i].x=1;
data[i].y=0;
}
}
__kernel void norm1(__global double2 *data, __global double2 *idata, __global double *error, int cut)
{
int nWidth = get_global_size(0);
int nHeight = get_global_size(1);
int ox=get_global_id(0); // Toma los indices en X
int oy=get_global_id(1); // Toma los indices en Y
int i;
double norm,intdata;
i= oy*nWidth+ox;
error[i]=0;
///OJO, aca las matrices vienen con fftshift
if( ((ox<cut) && (oy<cut) ) ||
((ox>(nWidth-cut)) && (oy<cut) ) ||
((ox<cut) && (oy>(nHeight-cut))) ||
((ox>(nWidth-cut)) && (oy>(nHeight-cut))) )
{
intdata=data[i].x*data[i].x+data[i].y*data[i].y;
intdata=sqrt((float)intdata);
error[i]=(intdata-idata[i].x)*(intdata-idata[i].x);
norm=sqrt(CAST(data[i].x*data[i].x+data[i].y*data[i].y));
if (norm>0)
{
data[i].x=(data[i].x/norm)*idata[i].x;
data[i].y=(data[i].y/norm)*idata[i].x;
}
else
{
data[i].x=idata[i].x;
data[i].y=0;
}
}
}
__kernel void norm2(__global double2 *data, __global double2 *idata)
{
int nWidth = get_global_size(0);
int nHeight = get_global_size(1);
int ox=get_global_id(0); // Toma los indices en X
int oy=get_global_id(1); // Toma los indices en Y
int i;
double norm;
i= oy*nWidth+ox;
norm=sqrt(CAST(data[i].x*data[i].x+data[i].y*data[i].y));
if (norm>0)
{
data[i].x=(data[i].x/norm)*idata[i].x;
data[i].y=(data[i].y/norm)*idata[i].x;
}
else
{
data[i].x=idata[i].x;
data[i].y=0;
}
}
"""
#TODO: The GS algorithm should also use an maximum error condition to stop
# Not only the iteration condition
def gs(idata,itera=10, ia=None):
"""Gerchberg-Saxton algorithm to calculate DOEs
Calculates the phase distribution in a object plane to obtain an
specific amplitude distribution in the target plane. It uses a
FFT to calculate the field propagation.
The wavefront at the DOE plane is assumed as a plane wave.
**ARGUMENTS:**
========== ======================================================
idata numpy array containing the target amplitude distribution
itera Maximum number of iterations
ia Illumination amplitude at the hologram plane if not given
it is assumed to be a constant amplitude with a value
of 1. If given it should be an array with the same shape
of idata
========== ======================================================
"""
if ia==None:
inpa=ones(idata.shape)
else:
inpa=ia
assert idata.shape==inpa.shape, "ia and idata must have the same dimentions"
fdata=fftshift(fft2(ifftshift(idata)))
e=1000
ea=1000
for i in range (itera):
fdata=exp(1.j*angle(fdata))*inpa
rdata=ifftshift(ifft2(fftshift(fdata)))
e= (abs(rdata)-idata).std()
if e>ea:
break
ea=e
rdata=exp(1.j*angle(rdata))*(idata)
fdata=fftshift(fft2(ifftshift(rdata)))
fdata=exp(1.j*angle(fdata))
return fdata*inpa
def gs_mod(idata,itera=10,osize=256):
"""Modiffied Gerchberg-Saxton algorithm to calculate DOEs
Calculates the phase distribution in a object plane to obtain an
specific amplitude distribution in the target plane. It uses a
FFT to calculate the field propagation.
The wavefront at the DOE plane is assumed as a plane wave.
This algoritm leaves a window around the image plane to allow the
noise to move there. It only optimises the center of the image.
**ARGUMENTS:**
========== ======================================================
idata numpy array containing the target amplitude distribution
itera Maximum number of iterations
osize Size of the center of the image to be optimized
It should be smaller than the image itself.
========== ======================================================
"""
M,N=idata.shape
cut=osize//2
zone=zeros_like(idata)
zone[M/2-cut:M/2+cut,N/2-cut:N/2+cut]=1
zone=zone.astype(bool)
mask=exp(2.j*pi*random(idata.shape))
mask[zone]=0
#~ imshow(abs(mask)),colorbar()
fdata=fftshift(fft2(ifftshift(idata+mask))) #Nota, colocar esta mascara es muy importante, por que si no no converge tan rapido
e=1000
ea=1000
for i in range (itera):
fdata=exp(1.j*angle(fdata))
rdata=ifftshift(ifft2(fftshift(fdata)))
#~ e= (abs(rdata[zone])-idata[zone]).std()
#~ if e>ea:
#~
#~ break
ea=e
rdata[zone]=exp(1.j*angle(rdata[zone]))*(idata[zone])
fdata=fftshift(fft2(ifftshift(rdata)))
fdata=exp(1.j*angle(fdata))
return fdata
def gs_gpu(idata,itera=100):
"""Gerchberg-Saxton algorithm to calculate DOEs using the GPU
Calculates the phase distribution in a object plane to obtain an
specific amplitude distribution in the target plane. It uses a
FFT to calculate the field propagation.
The wavefront at the DOE plane is assumed as a plane wave.
**ARGUMENTS:**
========== ======================================================
idata numpy array containing the target amplitude distribution
itera Maximum number of iterations
========== ======================================================
"""
pl=cl.get_platforms()[0]
devices=pl.get_devices(device_type=cl.device_type.GPU)
ctx = cl.Context(devices=[devices[0]])
queue = cl.CommandQueue(ctx)
plan = Plan(idata.shape, queue=queue,dtype=complex128) #no funciona con "complex128"
src = str(Template(KERNEL).render(
double_support=all(
has_double_support(dev) for dev in devices),
amd_double_support=all(
has_amd_double_support(dev) for dev in devices)
))
prg = cl.Program(ctx,src).build()
idata_gpu=cl_array.to_device(queue, ifftshift(idata).astype("complex128"))
fdata_gpu=cl_array.empty_like(idata_gpu)
rdata_gpu=cl_array.empty_like(idata_gpu)
plan.execute(idata_gpu.data,fdata_gpu.data)
e=1000
ea=1000
for i in range (itera):
prg.norm(queue, fdata_gpu.shape, None,fdata_gpu.data)
plan.execute(fdata_gpu.data,rdata_gpu.data,inverse=True)
tr=rdata_gpu.get()
rdata=ifftshift(tr)
#TODO: This calculation should be done in the GPU
e= (abs(rdata)-idata).std()
if e>ea:
break
ea=e
prg.norm2(queue, rdata_gpu.shape,None,rdata_gpu.data,idata_gpu.data)
plan.execute(rdata_gpu.data,fdata_gpu.data)
fdata=fdata_gpu.get()
#~ prg.norm(queue, fdata_gpu.shape, None,fdata_gpu.data)
fdata=ifftshift(fdata)
fdata=exp(1.j*angle(fdata))
#~ fdata=fdata_gpu.get()
return fdata
def gs_mod_gpu(idata,itera=10,osize=256):
cut=osize//2
pl=cl.get_platforms()[0]
devices=pl.get_devices(device_type=cl.device_type.GPU)
ctx = cl.Context(devices=[devices[0]])
queue = cl.CommandQueue(ctx)
plan = Plan(idata.shape, queue=queue,dtype=complex128) #no funciona con "complex128"
src = str(Template(KERNEL).render(
double_support=all(
has_double_support(dev) for dev in devices),
amd_double_support=all(
has_amd_double_support(dev) for dev in devices)
))
prg = cl.Program(ctx,src).build()
idata_gpu=cl_array.to_device(queue, ifftshift(idata).astype("complex128"))
fdata_gpu=cl_array.empty_like(idata_gpu)
rdata_gpu=cl_array.empty_like(idata_gpu)
plan.execute(idata_gpu.data,fdata_gpu.data)
mask=exp(2.j*pi*random(idata.shape))
mask[512-cut:512+cut,512-cut:512+cut]=0
idata_gpu=cl_array.to_device(queue, ifftshift(idata+mask).astype("complex128"))
fdata_gpu=cl_array.empty_like(idata_gpu)
rdata_gpu=cl_array.empty_like(idata_gpu)
error_gpu=cl_array.to_device(ctx, queue, zeros(idata_gpu.shape).astype("double"))
plan.execute(idata_gpu.data,fdata_gpu.data)
e=1000
ea=1000
for i in range (itera):
prg.norm(queue, fdata_gpu.shape, None,fdata_gpu.data)
plan.execute(fdata_gpu.data,rdata_gpu.data,inverse=True)
#~ prg.norm1(queue, rdata_gpu.shape,None,rdata_gpu.data,idata_gpu.data,error_gpu.data, int32(cut))
norm1=prg.norm1
norm1.set_scalar_arg_dtypes([None, None, None, int32])
norm1(queue, rdata_gpu.shape,None,rdata_gpu.data,idata_gpu.data,error_gpu.data, int32(cut))
e= sqrt(cl_array.sum(error_gpu).get())/(2*cut)
#~ if e>ea:
#~
#~ break
#~ ea=e
plan.execute(rdata_gpu.data,fdata_gpu.data)
fdata=fdata_gpu.get()
fdata=ifftshift(fdata)
fdata=exp(1.j*angle(fdata))
return fdata
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.