repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
dqnykamp/sympy | sympy/core/tests/test_exprtools.py | 7 | 13472 | """Tests for tools for manipulating of large commutative expressions. """
from sympy import (S, Add, sin, Mul, Symbol, oo, Integral, sqrt, Tuple, I,
Interval, O, symbols, simplify, collect, Sum, Basic, Dict,
root, exp, cos, sin)
from sympy.abc import a, b, t, x, y, z
from sympy.core.exprtools import (decompose_power, Factors, Term, _gcd_terms,
gcd_terms, factor_terms, factor_nc)
from sympy.core.mul import _keep_coeff as _keep_coeff
from sympy.simplify.cse_opts import sub_pre
from sympy.utilities.pytest import raises
def test_decompose_power():
assert decompose_power(x) == (x, 1)
assert decompose_power(x**2) == (x, 2)
assert decompose_power(x**(2*y)) == (x**y, 2)
assert decompose_power(x**(2*y/3)) == (x**(y/3), 2)
def test_Factors():
assert Factors() == Factors({}) == Factors(S(1))
assert Factors().as_expr() == S.One
assert Factors({x: 2, y: 3, sin(x): 4}).as_expr() == x**2*y**3*sin(x)**4
assert Factors(S.Infinity) == Factors({oo: 1})
assert Factors(S.NegativeInfinity) == Factors({oo: 1, -1: 1})
a = Factors({x: 5, y: 3, z: 7})
b = Factors({ y: 4, z: 3, t: 10})
assert a.mul(b) == a*b == Factors({x: 5, y: 7, z: 10, t: 10})
assert a.div(b) == divmod(a, b) == \
(Factors({x: 5, z: 4}), Factors({y: 1, t: 10}))
assert a.quo(b) == a/b == Factors({x: 5, z: 4})
assert a.rem(b) == a % b == Factors({y: 1, t: 10})
assert a.pow(3) == a**3 == Factors({x: 15, y: 9, z: 21})
assert b.pow(3) == b**3 == Factors({y: 12, z: 9, t: 30})
assert a.gcd(b) == Factors({y: 3, z: 3})
assert a.lcm(b) == Factors({x: 5, y: 4, z: 7, t: 10})
a = Factors({x: 4, y: 7, t: 7})
b = Factors({z: 1, t: 3})
assert a.normal(b) == (Factors({x: 4, y: 7, t: 4}), Factors({z: 1}))
assert Factors(sqrt(2)*x).as_expr() == sqrt(2)*x
assert Factors(-I)*I == Factors()
assert Factors({S(-1): S(3)})*Factors({S(-1): S(1), I: S(5)}) == \
Factors(I)
assert Factors(S(2)**x).div(S(3)**x) == \
(Factors({S(2): x}), Factors({S(3): x}))
assert Factors(2**(2*x + 2)).div(S(8)) == \
(Factors({S(2): 2*x + 2}), Factors({S(8): S(1)}))
# coverage
# /!\ things break if this is not True
assert Factors({S(-1): S(3)/2}) == Factors({I: S.One, S(-1): S.One})
assert Factors({I: S(1), S(-1): S(1)/3}).as_expr() == I*(-1)**(S(1)/3)
assert Factors(-1.) == Factors({S(-1): S(1), S(1.): 1})
assert Factors(-2.) == Factors({S(-1): S(1), S(2.): 1})
assert Factors((-2.)**x) == Factors({S(-2.): x})
assert Factors(S(-2)) == Factors({S(-1): S(1), S(2): 1})
assert Factors(S.Half) == Factors({S(2): -S.One})
assert Factors(S(3)/2) == Factors({S(3): S.One, S(2): S(-1)})
assert Factors({I: S(1)}) == Factors(I)
assert Factors({-1.0: 2, I: 1}) == Factors({S(1.0): 1, I: 1})
assert Factors({S.NegativeOne: -S(3)/2}).as_expr() == I
A = symbols('A', commutative=False)
assert Factors(2*A**2) == Factors({S(2): 1, A**2: 1})
assert Factors(I) == Factors({I: S.One})
assert Factors(x).normal(S(2)) == (Factors(x), Factors(S(2)))
assert Factors(x).normal(S(0)) == (Factors(), Factors(S(0)))
raises(ZeroDivisionError, lambda: Factors(x).div(S(0)))
assert Factors(x).mul(S(2)) == Factors(2*x)
assert Factors(x).mul(S(0)).is_zero
assert Factors(x).mul(1/x).is_one
assert Factors(x**sqrt(2)**3).as_expr() == x**(2*sqrt(2))
assert Factors(x)**Factors(S(2)) == Factors(x**2)
assert Factors(x).gcd(S(0)) == Factors(x)
assert Factors(x).lcm(S(0)).is_zero
assert Factors(S(0)).div(x) == (Factors(S(0)), Factors())
assert Factors(x).div(x) == (Factors(), Factors())
assert Factors({x: .2})/Factors({x: .2}) == Factors()
assert Factors(x) != Factors()
assert Factors(S(0)).normal(x) == (Factors(S(0)), Factors())
n, d = x**(2 + y), x**2
f = Factors(n)
assert f.div(d) == f.normal(d) == (Factors(x**y), Factors())
assert f.gcd(d) == Factors()
d = x**y
assert f.div(d) == f.normal(d) == (Factors(x**2), Factors())
assert f.gcd(d) == Factors(d)
n = d = 2**x
f = Factors(n)
assert f.div(d) == f.normal(d) == (Factors(), Factors())
assert f.gcd(d) == Factors(d)
n, d = 2**x, 2**y
f = Factors(n)
assert f.div(d) == f.normal(d) == (Factors({S(2): x}), Factors({S(2): y}))
assert f.gcd(d) == Factors()
# extraction of constant only
n = x**(x + 3)
assert Factors(n).normal(x**-3) == (Factors({x: x + 6}), Factors({}))
assert Factors(n).normal(x**3) == (Factors({x: x}), Factors({}))
assert Factors(n).normal(x**4) == (Factors({x: x}), Factors({x: 1}))
assert Factors(n).normal(x**(y - 3)) == \
(Factors({x: x + 6}), Factors({x: y}))
assert Factors(n).normal(x**(y + 3)) == (Factors({x: x}), Factors({x: y}))
assert Factors(n).normal(x**(y + 4)) == \
(Factors({x: x}), Factors({x: y + 1}))
assert Factors(n).div(x**-3) == (Factors({x: x + 6}), Factors({}))
assert Factors(n).div(x**3) == (Factors({x: x}), Factors({}))
assert Factors(n).div(x**4) == (Factors({x: x}), Factors({x: 1}))
assert Factors(n).div(x**(y - 3)) == \
(Factors({x: x + 6}), Factors({x: y}))
assert Factors(n).div(x**(y + 3)) == (Factors({x: x}), Factors({x: y}))
assert Factors(n).div(x**(y + 4)) == \
(Factors({x: x}), Factors({x: y + 1}))
def test_Term():
a = Term(4*x*y**2/z/t**3)
b = Term(2*x**3*y**5/t**3)
assert a == Term(4, Factors({x: 1, y: 2}), Factors({z: 1, t: 3}))
assert b == Term(2, Factors({x: 3, y: 5}), Factors({t: 3}))
assert a.as_expr() == 4*x*y**2/z/t**3
assert b.as_expr() == 2*x**3*y**5/t**3
assert a.inv() == \
Term(S(1)/4, Factors({z: 1, t: 3}), Factors({x: 1, y: 2}))
assert b.inv() == Term(S(1)/2, Factors({t: 3}), Factors({x: 3, y: 5}))
assert a.mul(b) == a*b == \
Term(8, Factors({x: 4, y: 7}), Factors({z: 1, t: 6}))
assert a.quo(b) == a/b == Term(2, Factors({}), Factors({x: 2, y: 3, z: 1}))
assert a.pow(3) == a**3 == \
Term(64, Factors({x: 3, y: 6}), Factors({z: 3, t: 9}))
assert b.pow(3) == b**3 == Term(8, Factors({x: 9, y: 15}), Factors({t: 9}))
assert a.pow(-3) == a**(-3) == \
Term(S(1)/64, Factors({z: 3, t: 9}), Factors({x: 3, y: 6}))
assert b.pow(-3) == b**(-3) == \
Term(S(1)/8, Factors({t: 9}), Factors({x: 9, y: 15}))
assert a.gcd(b) == Term(2, Factors({x: 1, y: 2}), Factors({t: 3}))
assert a.lcm(b) == Term(4, Factors({x: 3, y: 5}), Factors({z: 1, t: 3}))
a = Term(4*x*y**2/z/t**3)
b = Term(2*x**3*y**5*t**7)
assert a.mul(b) == Term(8, Factors({x: 4, y: 7, t: 4}), Factors({z: 1}))
assert Term((2*x + 2)**3) == Term(8, Factors({x + 1: 3}), Factors({}))
assert Term((2*x + 2)*(3*x + 6)**2) == \
Term(18, Factors({x + 1: 1, x + 2: 2}), Factors({}))
def test_gcd_terms():
f = 2*(x + 1)*(x + 4)/(5*x**2 + 5) + (2*x + 2)*(x + 5)/(x**2 + 1)/5 + \
(2*x + 2)*(x + 6)/(5*x**2 + 5)
assert _gcd_terms(f) == ((S(6)/5)*((1 + x)/(1 + x**2)), 5 + x, 1)
assert _gcd_terms(Add.make_args(f)) == \
((S(6)/5)*((1 + x)/(1 + x**2)), 5 + x, 1)
newf = (S(6)/5)*((1 + x)*(5 + x)/(1 + x**2))
assert gcd_terms(f) == newf
args = Add.make_args(f)
# non-Basic sequences of terms treated as terms of Add
assert gcd_terms(list(args)) == newf
assert gcd_terms(tuple(args)) == newf
assert gcd_terms(set(args)) == newf
# but a Basic sequence is treated as a container
assert gcd_terms(Tuple(*args)) != newf
assert gcd_terms(Basic(Tuple(1, 3*y + 3*x*y), Tuple(1, 3))) == \
Basic((1, 3*y*(x + 1)), (1, 3))
# but we shouldn't change keys of a dictionary or some may be lost
assert gcd_terms(Dict((x*(1 + y), 2), (x + x*y, y + x*y))) == \
Dict({x*(y + 1): 2, x + x*y: y*(1 + x)})
assert gcd_terms((2*x + 2)**3 + (2*x + 2)**2) == 4*(x + 1)**2*(2*x + 3)
assert gcd_terms(0) == 0
assert gcd_terms(1) == 1
assert gcd_terms(x) == x
assert gcd_terms(2 + 2*x) == Mul(2, 1 + x, evaluate=False)
arg = x*(2*x + 4*y)
garg = 2*x*(x + 2*y)
assert gcd_terms(arg) == garg
assert gcd_terms(sin(arg)) == sin(garg)
# issue 6139-like
alpha, alpha1, alpha2, alpha3 = symbols('alpha:4')
a = alpha**2 - alpha*x**2 + alpha + x**3 - x*(alpha + 1)
rep = (alpha, (1 + sqrt(5))/2 + alpha1*x + alpha2*x**2 + alpha3*x**3)
s = (a/(x - alpha)).subs(*rep).series(x, 0, 1)
assert simplify(collect(s, x)) == -sqrt(5)/2 - S(3)/2 + O(x)
# issue 5917
assert _gcd_terms([S.Zero, S.Zero]) == (0, 0, 1)
assert _gcd_terms([2*x + 4]) == (2, x + 2, 1)
eq = x/(x + 1/x)
assert gcd_terms(eq, fraction=False) == eq
def test_factor_terms():
A = Symbol('A', commutative=False)
assert factor_terms(9*(x + x*y + 1) + (3*x + 3)**(2 + 2*x)) == \
9*x*y + 9*x + _keep_coeff(S(3), x + 1)**_keep_coeff(S(2), x + 1) + 9
assert factor_terms(9*(x + x*y + 1) + (3)**(2 + 2*x)) == \
_keep_coeff(S(9), 3**(2*x) + x*y + x + 1)
assert factor_terms(3**(2 + 2*x) + a*3**(2 + 2*x)) == \
9*3**(2*x)*(a + 1)
assert factor_terms(x + x*A) == \
x*(1 + A)
assert factor_terms(sin(x + x*A)) == \
sin(x*(1 + A))
assert factor_terms((3*x + 3)**((2 + 2*x)/3)) == \
_keep_coeff(S(3), x + 1)**_keep_coeff(S(2)/3, x + 1)
assert factor_terms(x + (x*y + x)**(3*x + 3)) == \
x + (x*(y + 1))**_keep_coeff(S(3), x + 1)
assert factor_terms(a*(x + x*y) + b*(x*2 + y*x*2)) == \
x*(a + 2*b)*(y + 1)
i = Integral(x, (x, 0, oo))
assert factor_terms(i) == i
# check radical extraction
eq = sqrt(2) + sqrt(10)
assert factor_terms(eq) == eq
assert factor_terms(eq, radical=True) == sqrt(2)*(1 + sqrt(5))
eq = root(-6, 3) + root(6, 3)
assert factor_terms(eq, radical=True) == 6**(S(1)/3)*(1 + (-1)**(S(1)/3))
eq = [x + x*y]
ans = [x*(y + 1)]
for c in [list, tuple, set]:
assert factor_terms(c(eq)) == c(ans)
assert factor_terms(Tuple(x + x*y)) == Tuple(x*(y + 1))
assert factor_terms(Interval(0, 1)) == Interval(0, 1)
e = 1/sqrt(a/2 + 1)
assert factor_terms(e, clear=False) == 1/sqrt(a/2 + 1)
assert factor_terms(e, clear=True) == sqrt(2)/sqrt(a + 2)
eq = x/(x + 1/x) + 1/(x**2 + 1)
assert factor_terms(eq, fraction=False) == eq
assert factor_terms(eq, fraction=True) == 1
assert factor_terms((1/(x**3 + x**2) + 2/x**2)*y) == \
y*(2 + 1/(x + 1))/x**2
# if not True, then processesing for this in factor_terms is not necessary
assert gcd_terms(-x - y) == -x - y
assert factor_terms(-x - y) == Mul(-1, x + y, evaluate=False)
# if not True, then "special" processesing in factor_terms is not necessary
assert gcd_terms(exp(Mul(-1, x + 1))) == exp(-x - 1)
e = exp(-x - 2) + x
assert factor_terms(e) == exp(Mul(-1, x + 2, evaluate=False)) + x
assert factor_terms(e, sign=False) == e
assert factor_terms(exp(-4*x - 2) - x) == -x + exp(Mul(-2, 2*x + 1, evaluate=False))
def test_xreplace():
e = Mul(2, 1 + x, evaluate=False)
assert e.xreplace({}) == e
assert e.xreplace({y: x}) == e
def test_factor_nc():
x, y = symbols('x,y')
k = symbols('k', integer=True)
n, m, o = symbols('n,m,o', commutative=False)
# mul and multinomial expansion is needed
from sympy.simplify.simplify import _mexpand
e = x*(1 + y)**2
assert _mexpand(e) == x + x*2*y + x*y**2
def factor_nc_test(e):
ex = _mexpand(e)
assert ex.is_Add
f = factor_nc(ex)
assert not f.is_Add and _mexpand(f) == ex
factor_nc_test(x*(1 + y))
factor_nc_test(n*(x + 1))
factor_nc_test(n*(x + m))
factor_nc_test((x + m)*n)
factor_nc_test(n*m*(x*o + n*o*m)*n)
s = Sum(x, (x, 1, 2))
factor_nc_test(x*(1 + s))
factor_nc_test(x*(1 + s)*s)
factor_nc_test(x*(1 + sin(s)))
factor_nc_test((1 + n)**2)
factor_nc_test((x + n)*(x + m)*(x + y))
factor_nc_test(x*(n*m + 1))
factor_nc_test(x*(n*m + x))
factor_nc_test(x*(x*n*m + 1))
factor_nc_test(x*n*(x*m + 1))
factor_nc_test(x*(m*n + x*n*m))
factor_nc_test(n*(1 - m)*n**2)
factor_nc_test((n + m)**2)
factor_nc_test((n - m)*(n + m)**2)
factor_nc_test((n + m)**2*(n - m))
factor_nc_test((m - n)*(n + m)**2*(n - m))
assert factor_nc(n*(n + n*m)) == n**2*(1 + m)
assert factor_nc(m*(m*n + n*m*n**2)) == m*(m + n*m*n)*n
eq = m*sin(n) - sin(n)*m
assert factor_nc(eq) == eq
# for coverage:
from sympy.physics.secondquant import Commutator
from sympy import factor
eq = 1 + x*Commutator(m, n)
assert factor_nc(eq) == eq
eq = x*Commutator(m, n) + x*Commutator(m, o)*Commutator(m, n)
assert factor(eq) == x*(1 + Commutator(m, o))*Commutator(m, n)
# issue 6534
assert (2*n + 2*m).factor() == 2*(n + m)
# issue 6701
assert factor_nc(n**k + n**(k + 1)) == n**k*(1 + n)
assert factor_nc((m*n)**k + (m*n)**(k + 1)) == (1 + m*n)*(m*n)**k
# issue 6918
assert factor_nc(-n*(2*x**2 + 2*x)) == -2*n*x*(x + 1)
def test_issue_6360():
a, b = symbols("a b")
apb = a + b
eq = apb + apb**2*(-2*a - 2*b)
assert factor_terms(sub_pre(eq)) == a + b - 2*(a + b)**3
def test_issue_7903():
a = symbols(r'a', real=True)
t = exp(I*cos(a)) + exp(-I*sin(a))
assert t.simplify()
| bsd-3-clause |
panoptes/POCS | src/panoptes/pocs/mount/serial.py | 1 | 7188 | import re
from abc import ABC
from panoptes.utils import error
from panoptes.utils import rs232
from panoptes.pocs.mount import AbstractMount
class AbstractSerialMount(AbstractMount, ABC):
def __init__(self, *args, **kwargs):
"""Initialize an AbstractSerialMount for the port defined in the config.
Opens a connection to the serial device, if it is valid.
"""
super().__init__(*args, **kwargs)
# This should be overridden by derived classes.
self._status_format = re.compile('.*')
# Setup our serial connection at the given port
try:
serial_config = self.get_config('mount.serial')
self.serial = rs232.SerialData(**serial_config)
if self.serial.is_connected is False:
raise error.MountNotFound("Can't open mount")
except KeyError:
self.logger.critical("No serial config specified, cannot create mount: "
f"{self.get_config('mount')}")
except Exception as e:
self.logger.critical(e)
@property
def _port(self):
return self.serial.ser.port
def connect(self):
"""Connects to the mount via the serial port (`self._port`)
Returns:
Returns the self.is_connected property (bool) which checks
the actual serial connection.
"""
self.logger.debug('Connecting to mount')
if self.serial and not self.serial.is_connected:
try:
self._connect()
except OSError as err:
self.logger.error(f"{err!r}")
except error.BadSerialConnection as err:
self.logger.warning('Could not create serial connection to mount.')
self.logger.warning(f'NO MOUNT CONTROL AVAILABLE {err!r}')
self._is_connected = True
self.logger.info(f'Mount connected: {self.is_connected}')
return self.is_connected
def disconnect(self):
self.logger.debug("Closing serial port for mount")
if self.serial:
self.serial.disconnect()
self._is_connected = self.serial.is_connected
def set_tracking_rate(self, direction='ra', delta=0.0):
"""Set the tracking rate for the mount
Args:
direction (str, optional): Either `ra` or `dec`
delta (float, optional): Offset multiple of sidereal rate, defaults to 0.0
"""
delta = round(float(delta), 4)
# Restrict range
if delta > 0.01:
delta = 0.01
elif delta < -0.01:
delta = -0.01
# Dumb hack work-around for beginning 0
delta_str_f, delta_str_b = f'{delta:+0.04f}'.split('.')
delta_str_f += '0' # Add extra zero
delta_str = f'{delta_str_f}.{delta_str_b}'
self.logger.debug(f'Setting tracking rate to sidereal {delta_str}')
if self.query('set_custom_tracking'):
self.logger.debug("Custom tracking rate set")
response = self.query(f'set_custom_{direction}_tracking_rate', f'{delta_str}')
self.logger.debug(f'Tracking response: {response}')
if response:
self.tracking = 'Custom'
self.tracking_rate = 1.0 + delta
self.logger.debug('Custom tracking rate sent')
def write(self, cmd):
""" Sends a string command to the mount via the serial port.
First 'translates' the message into the form specific mount can understand using the
mount configuration yaml file. This method is most often used from within `query` and
may become a private method in the future.
Note:
This command currently does not support the passing of parameters. See `query` instead.
Args:
cmd (str): A command to send to the mount. This should be one of the commands listed
in the mount commands yaml file.
"""
assert self.is_initialized, self.logger.warning('Mount has not been initialized')
# self.serial.reset_input_buffer()
# self.logger.debug("Mount Query: {}".format(cmd))
self.serial.write(cmd)
def read(self, *args):
""" Reads from the serial connection
Returns:
str: Response from mount
"""
assert self.is_initialized, self.logger.warning('Mount has not been initialized')
response = ''
response = self.serial.read()
self.logger.trace(f'Mount Read: {response}')
# Strip the line ending (#) and return
response = response.rstrip('#')
# If it is an integer, turn it into one
if response == '0' or response == '1':
try:
response = int(response)
except ValueError:
pass
return response
def _connect(self):
""" Sets up serial connection """
self.logger.debug(f'Making serial connection for mount at {self._port}')
try:
self.serial.connect()
except Exception:
raise error.BadSerialConnection(
f'Cannot create serial connect for mount at port {self._port}')
self.logger.debug('Mount connected via serial')
def _get_command(self, cmd, params=None):
""" Looks up appropriate command for telescope """
full_command = ''
# Get the actual command
cmd_info = self.commands.get(cmd)
if cmd_info is not None:
# Check if this command needs params
if 'params' in cmd_info:
if params is None:
raise error.InvalidMountCommand(
f'{cmd} expects params: {cmd_info.get("params")}')
full_command = f"{self._pre_cmd}{cmd_info.get('cmd')}{params}{self._post_cmd}"
else:
full_command = f"{self._pre_cmd}{cmd_info.get('cmd')}{self._post_cmd}"
self.logger.trace(f'Mount Full Command: {full_command}')
else:
self.logger.warning(f'No command for {cmd}')
return full_command
def _update_status(self):
self._raw_status = self.query('get_status')
status = dict()
status_match = self._status_format.fullmatch(self._raw_status)
if status_match:
status = status_match.groupdict()
# Lookup the text values and replace in status dict
for k, v in status.items():
status[k] = self._status_lookup[k][v]
self._state = status['state']
self._movement_speed = status['movement_speed']
self._at_mount_park = 'Park' in self.state
self._is_home = 'Stopped - Zero Position' in self.state
self._is_tracking = 'Tracking' in self.state
self._is_slewing = 'Slewing' in self.state
guide_rate = self.query('get_guide_rate')
self.ra_guide_rate = int(guide_rate[0:2]) / 100
self.dec_guide_rate = int(guide_rate[2:]) / 100
status['timestamp'] = self.query('get_local_time')
status['tracking_rate_ra'] = self.tracking_rate
return status
| mit |
crossroadchurch/paul | openlp/plugins/bibles/lib/csvbible.py | 1 | 6989 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`cvsbible` modules provides a facility to import bibles from a set of CSV files.
The module expects two mandatory files containing the books and the verses.
The format of the books file is:
<book_id>,<testament_id>,<book_name>,<book_abbreviation>
For example
1,1,Genesis,Gen
2,1,Exodus,Exod
...
40,2,Matthew,Matt
There are two acceptable formats of the verses file. They are:
<book_id>,<chapter_number>,<verse_number>,<verse_text>
or
<book_name>,<chapter_number>,<verse_number>,<verse_text>
For example:
1,1,1,"In the beginning God created the heaven and the earth."
or
"Genesis",1,2,"And the earth was without form, and void; and...."
All CSV files are expected to use a comma (',') as the delimiter and double quotes ('"') as the quote symbol.
"""
import logging
import chardet
import csv
from openlp.core.common import translate
from openlp.plugins.bibles.lib.db import BibleDB, BiblesResourcesDB
log = logging.getLogger(__name__)
class CSVBible(BibleDB):
"""
This class provides a specialisation for importing of CSV Bibles.
"""
log.info('CSVBible loaded')
def __init__(self, parent, **kwargs):
"""
Loads a Bible from a set of CSV files. This class assumes the files contain all the information and a clean
bible is being loaded.
"""
log.info(self.__class__.__name__)
BibleDB.__init__(self, parent, **kwargs)
self.books_file = kwargs['booksfile']
self.verses_file = kwargs['versefile']
def do_import(self, bible_name=None):
"""
Import the bible books and verses.
"""
self.wizard.progress_bar.setValue(0)
self.wizard.progress_bar.setMinimum(0)
self.wizard.progress_bar.setMaximum(66)
success = True
language_id = self.get_language(bible_name)
if not language_id:
log.error('Importing books from "%s" failed' % self.filename)
return False
books_file = None
book_list = {}
# Populate the Tables
try:
details = get_file_encoding(self.books_file)
books_file = open(self.books_file, 'r', encoding=details['encoding'])
books_reader = csv.reader(books_file, delimiter=',', quotechar='"')
for line in books_reader:
if self.stop_import_flag:
break
self.wizard.increment_progress_bar(translate('BiblesPlugin.CSVBible', 'Importing books... %s')
% line[2])
book_ref_id = self.get_book_ref_id_by_name(line[2], 67, language_id)
if not book_ref_id:
log.error('Importing books from "%s" failed' % self.books_file)
return False
book_details = BiblesResourcesDB.get_book_by_id(book_ref_id)
self.create_book(line[2], book_ref_id, book_details['testament_id'])
book_list.update({int(line[0]): line[2]})
self.application.process_events()
except (IOError, IndexError):
log.exception('Loading books from file failed')
success = False
finally:
if books_file:
books_file.close()
if self.stop_import_flag or not success:
return False
self.wizard.progress_bar.setValue(0)
self.wizard.progress_bar.setMaximum(67)
verse_file = None
try:
book_ptr = None
details = get_file_encoding(self.verses_file)
verse_file = open(self.verses_file, 'r', encoding=details['encoding'])
verse_reader = csv.reader(verse_file, delimiter=',', quotechar='"')
for line in verse_reader:
if self.stop_import_flag:
break
try:
line_book = book_list[int(line[0])]
except ValueError:
line_book = line[0]
if book_ptr != line_book:
book = self.get_book(line_book)
book_ptr = book.name
self.wizard.increment_progress_bar(
translate('BiblesPlugin.CSVBible',
'Importing verses from %s...' % book.name, 'Importing verses from <book name>...'))
self.session.commit()
verse_text = line[3]
self.create_verse(book.id, line[1], line[2], verse_text)
self.wizard.increment_progress_bar(translate('BiblesPlugin.CSVBible', 'Importing verses... done.'))
self.application.process_events()
self.session.commit()
except IOError:
log.exception('Loading verses from file failed')
success = False
finally:
if verse_file:
verse_file.close()
if self.stop_import_flag:
return False
else:
return success
def get_file_encoding(filename):
"""
Utility function to get the file encoding.
"""
detect_file = None
try:
detect_file = open(filename, 'rb')
details = chardet.detect(detect_file.read(1024))
except IOError:
log.exception('Error detecting file encoding')
finally:
if detect_file:
detect_file.close()
return details
| gpl-2.0 |
antin/Open-Knesset | suggestions/migrations/0001_initial.py | 14 | 8743 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Suggestion'
db.create_table('suggestions_suggestion', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('suggested_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True, blank=True)),
('suggested_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='suggestions', to=orm['auth.User'])),
('comment', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('resolved_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('resolved_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='resolved_suggestions', null=True, to=orm['auth.User'])),
('resolved_status', self.gf('django.db.models.fields.IntegerField')(default=0, db_index=True)),
))
db.send_create_signal('suggestions', ['Suggestion'])
# Adding model 'SuggestedAction'
db.create_table('suggestions_suggestedaction', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('suggestion', self.gf('django.db.models.fields.related.ForeignKey')(related_name='actions', to=orm['suggestions.Suggestion'])),
('action', self.gf('django.db.models.fields.PositiveIntegerField')()),
('subject_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='action_subjects', to=orm['contenttypes.ContentType'])),
('subject_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
))
db.send_create_signal('suggestions', ['SuggestedAction'])
# Adding model 'ActionFields'
db.create_table('suggestions_actionfields', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('action', self.gf('django.db.models.fields.related.ForeignKey')(related_name='action_fields', to=orm['suggestions.SuggestedAction'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('value', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('value_type', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='action_values', null=True, to=orm['contenttypes.ContentType'])),
('value_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
))
db.send_create_signal('suggestions', ['ActionFields'])
def backwards(self, orm):
# Deleting model 'Suggestion'
db.delete_table('suggestions_suggestion')
# Deleting model 'SuggestedAction'
db.delete_table('suggestions_suggestedaction')
# Deleting model 'ActionFields'
db.delete_table('suggestions_actionfields')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'suggestions.actionfields': {
'Meta': {'object_name': 'ActionFields'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'action_fields'", 'to': "orm['suggestions.SuggestedAction']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_values'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"})
},
'suggestions.suggestedaction': {
'Meta': {'object_name': 'SuggestedAction'},
'action': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subject_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'subject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'action_subjects'", 'to': "orm['contenttypes.ContentType']"}),
'suggestion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'to': "orm['suggestions.Suggestion']"})
},
'suggestions.suggestion': {
'Meta': {'object_name': 'Suggestion'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'resolved_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'resolved_suggestions'", 'null': 'True', 'to': "orm['auth.User']"}),
'resolved_status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'suggested_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True', 'blank': 'True'}),
'suggested_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'suggestions'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['suggestions'] | bsd-3-clause |
haveal/googleads-python-lib | examples/dfp/v201505/activity_group_service/create_activity_groups.py | 4 | 2447 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new activity groups.
To determine which activity groups exist, run get_all_activity_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import uuid
# Import appropriate modules from the client library.
from googleads import dfp
# Set the ID of the advertiser company this activity group is associated with.
ADVERTISER_COMPANY_ID = 'INSERT_ADVERTISER_COMPANY_ID_HERE'
def main(client, advertiser_company_id):
# Initialize appropriate service.
activity_group_service = client.GetService('ActivityGroupService',
version='v201505')
# Create a short-term activity group.
short_term_activity_group = {
'name': 'Short-term activity group #%s' % uuid.uuid4(),
'companyIds': [advertiser_company_id],
'clicksLookback': '1',
'impressionsLookback': '1'
}
# Create a long-term activity group.
long_term_activity_group = {
'name': 'Long-term activity group #%s' % uuid.uuid4(),
'companyIds': [advertiser_company_id],
'clicksLookback': '30',
'impressionsLookback': '30'
}
# Create the activity groups on the server.
activity_groups = activity_group_service.createActivityGroups([
short_term_activity_group, long_term_activity_group])
# Display results.
for activity_group in activity_groups:
print ('Activity group with ID \'%s\' and name \'%s\' was created.'
% (activity_group['id'], activity_group['name']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, ADVERTISER_COMPANY_ID)
| apache-2.0 |
jaraco/irc | irc/tests/test_client.py | 1 | 1518 | from unittest import mock
import pytest
import irc.client
def test_version():
assert isinstance(irc._get_version(), str)
@mock.patch('irc.connection.socket')
def test_privmsg_sends_msg(socket_mod):
server = irc.client.Reactor().server()
server.connect('foo', 6667, 'bestnick')
# make sure the mock object doesn't have a write method or it will treat
# it as an SSL connection and never call .send.
del server.socket.write
server.privmsg('#best-channel', 'You are great')
server.socket.send.assert_called_with(b'PRIVMSG #best-channel :You are great\r\n')
@mock.patch('irc.connection.socket')
def test_privmsg_fails_on_embedded_carriage_returns(socket_mod):
server = irc.client.Reactor().server()
server.connect('foo', 6667, 'bestnick')
with pytest.raises(ValueError):
server.privmsg('#best-channel', 'You are great\nSo are you')
class TestHandlers:
def test_handlers_same_priority(self):
"""
Two handlers of the same priority should still compare.
"""
handler1 = irc.client.PrioritizedHandler(1, lambda: None)
handler2 = irc.client.PrioritizedHandler(1, lambda: 'other')
assert not handler1 < handler2
assert not handler2 < handler1
@mock.patch('irc.connection.socket')
def test_command_without_arguments(self):
"A command without arguments should not crash"
server = irc.client.Reactor().server()
server.connect('foo', 6667, 'bestnick')
server._process_line('GLOBALUSERSTATE')
| mit |
wilsonssun/baseball-gamethread | build/lxml/build/lib.linux-i686-2.7/lxml/html/_diffcommand.py | 93 | 2084 | import optparse
import sys
import re
import os
from lxml.html.diff import htmldiff
description = """\
"""
parser = optparse.OptionParser(
usage="%prog [OPTIONS] FILE1 FILE2\n"
"%prog --annotate [OPTIONS] INFO1 FILE1 INFO2 FILE2 ...",
description=description,
)
parser.add_option(
'-o', '--output',
metavar="FILE",
dest="output",
default="-",
help="File to write the difference to",
)
parser.add_option(
'-a', '--annotation',
action="store_true",
dest="annotation",
help="Do an annotation")
def main(args=None):
if args is None:
args = sys.argv[1:]
options, args = parser.parse_args(args)
if options.annotation:
return annotate(options, args)
if len(args) != 2:
print('Error: you must give two files')
parser.print_help()
sys.exit(1)
file1, file2 = args
input1 = read_file(file1)
input2 = read_file(file2)
body1 = split_body(input1)[1]
pre, body2, post = split_body(input2)
result = htmldiff(body1, body2)
result = pre + result + post
if options.output == '-':
if not result.endswith('\n'):
result += '\n'
sys.stdout.write(result)
else:
f = open(options.output, 'wb')
f.write(result)
f.close()
def read_file(filename):
if filename == '-':
c = sys.stdin.read()
elif not os.path.exists(filename):
raise OSError(
"Input file %s does not exist" % filename)
else:
f = open(filename, 'rb')
c = f.read()
f.close()
return c
body_start_re = re.compile(
r"<body.*?>", re.I|re.S)
body_end_re = re.compile(
r"</body.*?>", re.I|re.S)
def split_body(html):
match = body_start_re.search(html)
if match:
pre = html[:match.end()]
html = html[match.end():]
match = body_end_re.search(html)
if match:
post = html[match.start():]
html = html[:match.start()]
return pre, html, post
def annotate(options, args):
print("Not yet implemented")
sys.exit(1)
| bsd-3-clause |
kenshay/ImageScript | Script_Runner/PYTHON/Lib/ctypes/test/test_pointers.py | 19 | 7239 | import unittest, sys
from ctypes import *
import _ctypes_test
ctype_types = [c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint,
c_long, c_ulong, c_longlong, c_ulonglong, c_double, c_float]
python_types = [int, int, int, int, int, int,
int, int, int, int, float, float]
class PointersTestCase(unittest.TestCase):
def test_pointer_crash(self):
class A(POINTER(c_ulong)):
pass
POINTER(c_ulong)(c_ulong(22))
# Pointer can't set contents: has no _type_
self.assertRaises(TypeError, A, c_ulong(33))
def test_pass_pointers(self):
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_p_p
if sizeof(c_longlong) == sizeof(c_void_p):
func.restype = c_longlong
else:
func.restype = c_long
i = c_int(12345678)
## func.argtypes = (POINTER(c_int),)
address = func(byref(i))
self.assertEqual(c_int.from_address(address).value, 12345678)
func.restype = POINTER(c_int)
res = func(pointer(i))
self.assertEqual(res.contents.value, 12345678)
self.assertEqual(res[0], 12345678)
def test_change_pointers(self):
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_p_p
i = c_int(87654)
func.restype = POINTER(c_int)
func.argtypes = (POINTER(c_int),)
res = func(pointer(i))
self.assertEqual(res[0], 87654)
self.assertEqual(res.contents.value, 87654)
# C code: *res = 54345
res[0] = 54345
self.assertEqual(i.value, 54345)
# C code:
# int x = 12321;
# res = &x
x = c_int(12321)
res.contents = x
self.assertEqual(i.value, 54345)
x.value = -99
self.assertEqual(res.contents.value, -99)
def test_callbacks_with_pointers(self):
# a function type receiving a pointer
PROTOTYPE = CFUNCTYPE(c_int, POINTER(c_int))
self.result = []
def func(arg):
for i in range(10):
## print arg[i],
self.result.append(arg[i])
## print
return 0
callback = PROTOTYPE(func)
dll = CDLL(_ctypes_test.__file__)
# This function expects a function pointer,
# and calls this with an integer pointer as parameter.
# The int pointer points to a table containing the numbers 1..10
doit = dll._testfunc_callback_with_pointer
## i = c_int(42)
## callback(byref(i))
## self.assertEqual(i.value, 84)
doit(callback)
## print self.result
doit(callback)
## print self.result
def test_basics(self):
from operator import delitem
for ct, pt in zip(ctype_types, python_types):
i = ct(42)
p = pointer(i)
## print type(p.contents), ct
self.assertIs(type(p.contents), ct)
# p.contents is the same as p[0]
## print p.contents
## self.assertEqual(p.contents, 42)
## self.assertEqual(p[0], 42)
self.assertRaises(TypeError, delitem, p, 0)
def test_from_address(self):
from array import array
a = array('i', [100, 200, 300, 400, 500])
addr = a.buffer_info()[0]
p = POINTER(POINTER(c_int))
## print dir(p)
## print p.from_address
## print p.from_address(addr)[0][0]
def test_other(self):
class Table(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int)]
pt = pointer(Table(1, 2, 3))
self.assertEqual(pt.contents.a, 1)
self.assertEqual(pt.contents.b, 2)
self.assertEqual(pt.contents.c, 3)
pt.contents.c = 33
from ctypes import _pointer_type_cache
del _pointer_type_cache[Table]
def test_basic(self):
p = pointer(c_int(42))
# Although a pointer can be indexed, it has no length
self.assertRaises(TypeError, len, p)
self.assertEqual(p[0], 42)
self.assertEqual(p[0:1], [42])
self.assertEqual(p.contents.value, 42)
def test_charpp(self):
"""Test that a character pointer-to-pointer is correctly passed"""
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_c_p_p
func.restype = c_char_p
argv = (c_char_p * 2)()
argc = c_int( 2 )
argv[0] = b'hello'
argv[1] = b'world'
result = func( byref(argc), argv )
self.assertEqual(result, b'world')
def test_bug_1467852(self):
# http://sourceforge.net/tracker/?func=detail&atid=532154&aid=1467852&group_id=71702
x = c_int(5)
dummy = []
for i in range(32000):
dummy.append(c_int(i))
y = c_int(6)
p = pointer(x)
pp = pointer(p)
q = pointer(y)
pp[0] = q # <==
self.assertEqual(p[0], 6)
def test_c_void_p(self):
# http://sourceforge.net/tracker/?func=detail&aid=1518190&group_id=5470&atid=105470
if sizeof(c_void_p) == 4:
self.assertEqual(c_void_p(0xFFFFFFFF).value,
c_void_p(-1).value)
self.assertEqual(c_void_p(0xFFFFFFFFFFFFFFFF).value,
c_void_p(-1).value)
elif sizeof(c_void_p) == 8:
self.assertEqual(c_void_p(0xFFFFFFFF).value,
0xFFFFFFFF)
self.assertEqual(c_void_p(0xFFFFFFFFFFFFFFFF).value,
c_void_p(-1).value)
self.assertEqual(c_void_p(0xFFFFFFFFFFFFFFFFFFFFFFFF).value,
c_void_p(-1).value)
self.assertRaises(TypeError, c_void_p, 3.14) # make sure floats are NOT accepted
self.assertRaises(TypeError, c_void_p, object()) # nor other objects
def test_pointers_bool(self):
# NULL pointers have a boolean False value, non-NULL pointers True.
self.assertEqual(bool(POINTER(c_int)()), False)
self.assertEqual(bool(pointer(c_int())), True)
self.assertEqual(bool(CFUNCTYPE(None)(0)), False)
self.assertEqual(bool(CFUNCTYPE(None)(42)), True)
# COM methods are boolean True:
if sys.platform == "win32":
mth = WINFUNCTYPE(None)(42, "name", (), None)
self.assertEqual(bool(mth), True)
def test_pointer_type_name(self):
LargeNamedType = type('T' * 2 ** 25, (Structure,), {})
self.assertTrue(POINTER(LargeNamedType))
# to not leak references, we must clean _pointer_type_cache
from ctypes import _pointer_type_cache
del _pointer_type_cache[LargeNamedType]
def test_pointer_type_str_name(self):
large_string = 'T' * 2 ** 25
P = POINTER(large_string)
self.assertTrue(P)
# to not leak references, we must clean _pointer_type_cache
from ctypes import _pointer_type_cache
del _pointer_type_cache[id(P)]
def test_abstract(self):
from ctypes import _Pointer
self.assertRaises(TypeError, _Pointer.set_type, 42)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
arubdesu/EAs | priv_helpers.py | 2 | 1327 | #!/usr/bin/python
"""Given a whitelist of privHelpers from legit software, report on stuff to investigate."""
import os
import glob
def main():
"""gimme some main"""
allowed = ["com.barebones.authd",
"com.bombich.ccc",
"com.bombich.ccchelper",
"com.box.sync.bootstrapper",
"com.box.sync.iconhelper",
"com.github.GitHub.GHInstallCLI",# old?
"com.logmein.join.me.update-helper",
"com.macromates.auth_server",# old?
"com.microsoft.office.licensing.helper",
"com.microsoft.office.licensingV2.helper",
"com.oracle.java.JavaUpdateHelper",
"com.tunnelbear.mac.tbeard",
"com.teamviewer.Helper",
"fr.whitebox.packages",
"Google Drive Icon Helper"]# srsly, Google?
found_privs = glob.glob('/Library/PrivilegedHelperTools/*')
to_investigate = []
for priv in found_privs:
if os.path.basename(priv) not in allowed:
to_investigate.append(priv)
if to_investigate:
result = "Not in whitelist, investigate: \n" + "\n".join(*[to_investigate])
else:
result = "Nothing to see here."
print "<result>%s</result>" % result
if __name__ == '__main__':
main()
| apache-2.0 |
justinpotts/mozillians | vendor-local/lib/python/unidecode/x09e.py | 252 | 4615 | data = (
'Shu ', # 0x00
'Luo ', # 0x01
'Qi ', # 0x02
'Yi ', # 0x03
'Ji ', # 0x04
'Zhe ', # 0x05
'Yu ', # 0x06
'Zhan ', # 0x07
'Ye ', # 0x08
'Yang ', # 0x09
'Pi ', # 0x0a
'Ning ', # 0x0b
'Huo ', # 0x0c
'Mi ', # 0x0d
'Ying ', # 0x0e
'Meng ', # 0x0f
'Di ', # 0x10
'Yue ', # 0x11
'Yu ', # 0x12
'Lei ', # 0x13
'Bao ', # 0x14
'Lu ', # 0x15
'He ', # 0x16
'Long ', # 0x17
'Shuang ', # 0x18
'Yue ', # 0x19
'Ying ', # 0x1a
'Guan ', # 0x1b
'Qu ', # 0x1c
'Li ', # 0x1d
'Luan ', # 0x1e
'Niao ', # 0x1f
'Jiu ', # 0x20
'Ji ', # 0x21
'Yuan ', # 0x22
'Ming ', # 0x23
'Shi ', # 0x24
'Ou ', # 0x25
'Ya ', # 0x26
'Cang ', # 0x27
'Bao ', # 0x28
'Zhen ', # 0x29
'Gu ', # 0x2a
'Dong ', # 0x2b
'Lu ', # 0x2c
'Ya ', # 0x2d
'Xiao ', # 0x2e
'Yang ', # 0x2f
'Ling ', # 0x30
'Zhi ', # 0x31
'Qu ', # 0x32
'Yuan ', # 0x33
'Xue ', # 0x34
'Tuo ', # 0x35
'Si ', # 0x36
'Zhi ', # 0x37
'Er ', # 0x38
'Gua ', # 0x39
'Xiu ', # 0x3a
'Heng ', # 0x3b
'Zhou ', # 0x3c
'Ge ', # 0x3d
'Luan ', # 0x3e
'Hong ', # 0x3f
'Wu ', # 0x40
'Bo ', # 0x41
'Li ', # 0x42
'Juan ', # 0x43
'Hu ', # 0x44
'E ', # 0x45
'Yu ', # 0x46
'Xian ', # 0x47
'Ti ', # 0x48
'Wu ', # 0x49
'Que ', # 0x4a
'Miao ', # 0x4b
'An ', # 0x4c
'Kun ', # 0x4d
'Bei ', # 0x4e
'Peng ', # 0x4f
'Qian ', # 0x50
'Chun ', # 0x51
'Geng ', # 0x52
'Yuan ', # 0x53
'Su ', # 0x54
'Hu ', # 0x55
'He ', # 0x56
'E ', # 0x57
'Gu ', # 0x58
'Qiu ', # 0x59
'Zi ', # 0x5a
'Mei ', # 0x5b
'Mu ', # 0x5c
'Ni ', # 0x5d
'Yao ', # 0x5e
'Weng ', # 0x5f
'Liu ', # 0x60
'Ji ', # 0x61
'Ni ', # 0x62
'Jian ', # 0x63
'He ', # 0x64
'Yi ', # 0x65
'Ying ', # 0x66
'Zhe ', # 0x67
'Liao ', # 0x68
'Liao ', # 0x69
'Jiao ', # 0x6a
'Jiu ', # 0x6b
'Yu ', # 0x6c
'Lu ', # 0x6d
'Xuan ', # 0x6e
'Zhan ', # 0x6f
'Ying ', # 0x70
'Huo ', # 0x71
'Meng ', # 0x72
'Guan ', # 0x73
'Shuang ', # 0x74
'Lu ', # 0x75
'Jin ', # 0x76
'Ling ', # 0x77
'Jian ', # 0x78
'Xian ', # 0x79
'Cuo ', # 0x7a
'Jian ', # 0x7b
'Jian ', # 0x7c
'Yan ', # 0x7d
'Cuo ', # 0x7e
'Lu ', # 0x7f
'You ', # 0x80
'Cu ', # 0x81
'Ji ', # 0x82
'Biao ', # 0x83
'Cu ', # 0x84
'Biao ', # 0x85
'Zhu ', # 0x86
'Jun ', # 0x87
'Zhu ', # 0x88
'Jian ', # 0x89
'Mi ', # 0x8a
'Mi ', # 0x8b
'Wu ', # 0x8c
'Liu ', # 0x8d
'Chen ', # 0x8e
'Jun ', # 0x8f
'Lin ', # 0x90
'Ni ', # 0x91
'Qi ', # 0x92
'Lu ', # 0x93
'Jiu ', # 0x94
'Jun ', # 0x95
'Jing ', # 0x96
'Li ', # 0x97
'Xiang ', # 0x98
'Yan ', # 0x99
'Jia ', # 0x9a
'Mi ', # 0x9b
'Li ', # 0x9c
'She ', # 0x9d
'Zhang ', # 0x9e
'Lin ', # 0x9f
'Jing ', # 0xa0
'Ji ', # 0xa1
'Ling ', # 0xa2
'Yan ', # 0xa3
'Cu ', # 0xa4
'Mai ', # 0xa5
'Mai ', # 0xa6
'Ge ', # 0xa7
'Chao ', # 0xa8
'Fu ', # 0xa9
'Mian ', # 0xaa
'Mian ', # 0xab
'Fu ', # 0xac
'Pao ', # 0xad
'Qu ', # 0xae
'Qu ', # 0xaf
'Mou ', # 0xb0
'Fu ', # 0xb1
'Xian ', # 0xb2
'Lai ', # 0xb3
'Qu ', # 0xb4
'Mian ', # 0xb5
'[?] ', # 0xb6
'Feng ', # 0xb7
'Fu ', # 0xb8
'Qu ', # 0xb9
'Mian ', # 0xba
'Ma ', # 0xbb
'Mo ', # 0xbc
'Mo ', # 0xbd
'Hui ', # 0xbe
'Ma ', # 0xbf
'Zou ', # 0xc0
'Nen ', # 0xc1
'Fen ', # 0xc2
'Huang ', # 0xc3
'Huang ', # 0xc4
'Jin ', # 0xc5
'Guang ', # 0xc6
'Tian ', # 0xc7
'Tou ', # 0xc8
'Heng ', # 0xc9
'Xi ', # 0xca
'Kuang ', # 0xcb
'Heng ', # 0xcc
'Shu ', # 0xcd
'Li ', # 0xce
'Nian ', # 0xcf
'Chi ', # 0xd0
'Hei ', # 0xd1
'Hei ', # 0xd2
'Yi ', # 0xd3
'Qian ', # 0xd4
'Dan ', # 0xd5
'Xi ', # 0xd6
'Tuan ', # 0xd7
'Mo ', # 0xd8
'Mo ', # 0xd9
'Qian ', # 0xda
'Dai ', # 0xdb
'Chu ', # 0xdc
'You ', # 0xdd
'Dian ', # 0xde
'Yi ', # 0xdf
'Xia ', # 0xe0
'Yan ', # 0xe1
'Qu ', # 0xe2
'Mei ', # 0xe3
'Yan ', # 0xe4
'Jing ', # 0xe5
'Yu ', # 0xe6
'Li ', # 0xe7
'Dang ', # 0xe8
'Du ', # 0xe9
'Can ', # 0xea
'Yin ', # 0xeb
'An ', # 0xec
'Yan ', # 0xed
'Tan ', # 0xee
'An ', # 0xef
'Zhen ', # 0xf0
'Dai ', # 0xf1
'Can ', # 0xf2
'Yi ', # 0xf3
'Mei ', # 0xf4
'Dan ', # 0xf5
'Yan ', # 0xf6
'Du ', # 0xf7
'Lu ', # 0xf8
'Zhi ', # 0xf9
'Fen ', # 0xfa
'Fu ', # 0xfb
'Fu ', # 0xfc
'Min ', # 0xfd
'Min ', # 0xfe
'Yuan ', # 0xff
)
| bsd-3-clause |
yongshengwang/hue | build/env/lib/python2.7/site-packages/boto-2.38.0-py2.7.egg/boto/cloudsearch/layer2.py | 153 | 3010 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.cloudsearch.layer1 import Layer1
from boto.cloudsearch.domain import Domain
class Layer2(object):
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
host=None, debug=0, session_token=None, region=None,
validate_certs=True):
self.layer1 = Layer1(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
is_secure=is_secure,
port=port,
proxy=proxy,
proxy_port=proxy_port,
host=host,
debug=debug,
security_token=session_token,
region=region,
validate_certs=validate_certs)
def list_domains(self, domain_names=None):
"""
Return a list of :class:`boto.cloudsearch.domain.Domain`
objects for each domain defined in the current account.
"""
domain_data = self.layer1.describe_domains(domain_names)
return [Domain(self.layer1, data) for data in domain_data]
def create_domain(self, domain_name):
"""
Create a new CloudSearch domain and return the corresponding
:class:`boto.cloudsearch.domain.Domain` object.
"""
data = self.layer1.create_domain(domain_name)
return Domain(self.layer1, data)
def lookup(self, domain_name):
"""
Lookup a single domain
:param domain_name: The name of the domain to look up
:type domain_name: str
:return: Domain object, or None if the domain isn't found
:rtype: :class:`boto.cloudsearch.domain.Domain`
"""
domains = self.list_domains(domain_names=[domain_name])
if len(domains) > 0:
return domains[0]
| apache-2.0 |
Pluto-tv/chromium-crosswalk | tools/telemetry/third_party/gsutilz/third_party/boto/boto/machinelearning/exceptions.py | 127 | 1596 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import BotoServerError
class InternalServerException(BotoServerError):
pass
class LimitExceededException(BotoServerError):
pass
class IdempotentParameterMismatchException(BotoServerError):
pass
class ResourceInUseException(BotoServerError):
pass
class ResourceNotFoundException(BotoServerError):
pass
class PredictorNotMountedException(BotoServerError):
pass
class InvalidInputException(BotoServerError):
pass
| bsd-3-clause |
ltiao/networkx | networkx/algorithms/flow/shortestaugmentingpath.py | 56 | 10555 | # -*- coding: utf-8 -*-
"""
Shortest augmenting path algorithm for maximum flow problems.
"""
__author__ = """ysitu <[email protected]>"""
# Copyright (C) 2014 ysitu <[email protected]>
# All rights reserved.
# BSD license.
from collections import deque
import networkx as nx
from .utils import *
from .edmondskarp import edmonds_karp_core
__all__ = ['shortest_augmenting_path']
def shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase,
cutoff):
"""Implementation of the shortest augmenting path algorithm.
"""
if s not in G:
raise nx.NetworkXError('node %s not in graph' % str(s))
if t not in G:
raise nx.NetworkXError('node %s not in graph' % str(t))
if s == t:
raise nx.NetworkXError('source and sink are the same node')
if residual is None:
R = build_residual_network(G, capacity)
else:
R = residual
R_node = R.node
R_pred = R.pred
R_succ = R.succ
# Initialize/reset the residual network.
for u in R:
for e in R_succ[u].values():
e['flow'] = 0
# Initialize heights of the nodes.
heights = {t: 0}
q = deque([(t, 0)])
while q:
u, height = q.popleft()
height += 1
for v, attr in R_pred[u].items():
if v not in heights and attr['flow'] < attr['capacity']:
heights[v] = height
q.append((v, height))
if s not in heights:
# t is not reachable from s in the residual network. The maximum flow
# must be zero.
R.graph['flow_value'] = 0
return R
n = len(G)
m = R.size() / 2
# Initialize heights and 'current edge' data structures of the nodes.
for u in R:
R_node[u]['height'] = heights[u] if u in heights else n
R_node[u]['curr_edge'] = CurrentEdge(R_succ[u])
# Initialize counts of nodes in each level.
counts = [0] * (2 * n - 1)
for u in R:
counts[R_node[u]['height']] += 1
inf = R.graph['inf']
def augment(path):
"""Augment flow along a path from s to t.
"""
# Determine the path residual capacity.
flow = inf
it = iter(path)
u = next(it)
for v in it:
attr = R_succ[u][v]
flow = min(flow, attr['capacity'] - attr['flow'])
u = v
if flow * 2 > inf:
raise nx.NetworkXUnbounded(
'Infinite capacity path, flow unbounded above.')
# Augment flow along the path.
it = iter(path)
u = next(it)
for v in it:
R_succ[u][v]['flow'] += flow
R_succ[v][u]['flow'] -= flow
u = v
return flow
def relabel(u):
"""Relabel a node to create an admissible edge.
"""
height = n - 1
for v, attr in R_succ[u].items():
if attr['flow'] < attr['capacity']:
height = min(height, R_node[v]['height'])
return height + 1
if cutoff is None:
cutoff = float('inf')
# Phase 1: Look for shortest augmenting paths using depth-first search.
flow_value = 0
path = [s]
u = s
d = n if not two_phase else int(min(m ** 0.5, 2 * n ** (2. / 3)))
done = R_node[s]['height'] >= d
while not done:
height = R_node[u]['height']
curr_edge = R_node[u]['curr_edge']
# Depth-first search for the next node on the path to t.
while True:
v, attr = curr_edge.get()
if (height == R_node[v]['height'] + 1 and
attr['flow'] < attr['capacity']):
# Advance to the next node following an admissible edge.
path.append(v)
u = v
break
try:
curr_edge.move_to_next()
except StopIteration:
counts[height] -= 1
if counts[height] == 0:
# Gap heuristic: If relabeling causes a level to become
# empty, a minimum cut has been identified. The algorithm
# can now be terminated.
R.graph['flow_value'] = flow_value
return R
height = relabel(u)
if u == s and height >= d:
if not two_phase:
# t is disconnected from s in the residual network. No
# more augmenting paths exist.
R.graph['flow_value'] = flow_value
return R
else:
# t is at least d steps away from s. End of phase 1.
done = True
break
counts[height] += 1
R_node[u]['height'] = height
if u != s:
# After relabeling, the last edge on the path is no longer
# admissible. Retreat one step to look for an alternative.
path.pop()
u = path[-1]
break
if u == t:
# t is reached. Augment flow along the path and reset it for a new
# depth-first search.
flow_value += augment(path)
if flow_value >= cutoff:
R.graph['flow_value'] = flow_value
return R
path = [s]
u = s
# Phase 2: Look for shortest augmenting paths using breadth-first search.
flow_value += edmonds_karp_core(R, s, t, cutoff - flow_value)
R.graph['flow_value'] = flow_value
return R
def shortest_augmenting_path(G, s, t, capacity='capacity', residual=None,
value_only=False, two_phase=False, cutoff=None):
"""Find a maximum single-commodity flow using the shortest augmenting path
algorithm.
This function returns the residual network resulting after computing
the maximum flow. See below for details about the conventions
NetworkX uses for defining residual networks.
This algorithm has a running time of `O(n^2 m)` for `n` nodes and `m`
edges.
Parameters
----------
G : NetworkX graph
Edges of the graph are expected to have an attribute called
'capacity'. If this attribute is not present, the edge is
considered to have infinite capacity.
s : node
Source node for the flow.
t : node
Sink node for the flow.
capacity : string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
residual : NetworkX graph
Residual network on which the algorithm is to be executed. If None, a
new residual network is created. Default value: None.
value_only : bool
If True compute only the value of the maximum flow. This parameter
will be ignored by this algorithm because it is not applicable.
two_phase : bool
If True, a two-phase variant is used. The two-phase variant improves
the running time on unit-capacity networks from `O(nm)` to
`O(\min(n^{2/3}, m^{1/2}) m)`. Default value: False.
cutoff : integer, float
If specified, the algorithm will terminate when the flow value reaches
or exceeds the cutoff. In this case, it may be unable to immediately
determine a minimum cut. Default value: None.
Returns
-------
R : NetworkX DiGraph
Residual network after computing the maximum flow.
Raises
------
NetworkXError
The algorithm does not support MultiGraph and MultiDiGraph. If
the input graph is an instance of one of these two classes, a
NetworkXError is raised.
NetworkXUnbounded
If the graph has a path of infinite capacity, the value of a
feasible flow on the graph is unbounded above and the function
raises a NetworkXUnbounded.
See also
--------
:meth:`maximum_flow`
:meth:`minimum_cut`
:meth:`edmonds_karp`
:meth:`preflow_push`
Notes
-----
The residual network :samp:`R` from an input graph :samp:`G` has the
same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
in :samp:`G`.
For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
in :samp:`G` or zero otherwise. If the capacity is infinite,
:samp:`R[u][v]['capacity']` will have a high arbitrary finite value
that does not affect the solution of the problem. This value is stored in
:samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
:samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
The flow value, defined as the total flow into :samp:`t`, the sink, is
stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not
specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such
that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
:samp:`s`-:samp:`t` cut.
Examples
--------
>>> import networkx as nx
>>> from networkx.algorithms.flow import shortest_augmenting_path
The functions that implement flow algorithms and output a residual
network, such as this one, are not imported to the base NetworkX
namespace, so you have to explicitly import them from the flow package.
>>> G = nx.DiGraph()
>>> G.add_edge('x','a', capacity=3.0)
>>> G.add_edge('x','b', capacity=1.0)
>>> G.add_edge('a','c', capacity=3.0)
>>> G.add_edge('b','c', capacity=5.0)
>>> G.add_edge('b','d', capacity=4.0)
>>> G.add_edge('d','e', capacity=2.0)
>>> G.add_edge('c','y', capacity=2.0)
>>> G.add_edge('e','y', capacity=3.0)
>>> R = shortest_augmenting_path(G, 'x', 'y')
>>> flow_value = nx.maximum_flow_value(G, 'x', 'y')
>>> flow_value
3.0
>>> flow_value == R.graph['flow_value']
True
"""
R = shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase,
cutoff)
R.graph['algorithm'] = 'shortest_augmenting_path'
return R
| bsd-3-clause |
mou4e/zirconium | chrome/test/chromedriver/test/webserver.py | 68 | 6705 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import os
import threading
class Responder(object):
"""Sends a HTTP response. Used with TestWebServer."""
def __init__(self, handler):
self._handler = handler
def SendResponse(self, body):
"""Sends OK response with body."""
self.SendHeaders(len(body))
self.SendBody(body)
def SendResponseFromFile(self, path):
"""Sends OK response with the given file as the body."""
with open(path, 'r') as f:
self.SendResponse(f.read())
def SendHeaders(self, content_length=None):
"""Sends headers for OK response."""
self._handler.send_response(200)
if content_length:
self._handler.send_header('Content-Length', content_length)
self._handler.end_headers()
def SendError(self, code):
"""Sends response for the given HTTP error code."""
self._handler.send_error(code)
def SendBody(self, body):
"""Just sends the body, no headers."""
self._handler.wfile.write(body)
class Request(object):
"""An HTTP request."""
def __init__(self, handler):
self._handler = handler
def GetPath(self):
return self._handler.path
def GetHeader(self, name):
return self._handler.headers.getheader(name)
class _BaseServer(BaseHTTPServer.HTTPServer):
"""Internal server that throws if timed out waiting for a request."""
def __init__(self, on_request, server_cert_and_key_path=None):
"""Starts the server.
It is an HTTP server if parameter server_cert_and_key_path is not provided.
Otherwise, it is an HTTPS server.
Args:
server_cert_and_key_path: path to a PEM file containing the cert and key.
if it is None, start the server as an HTTP one.
"""
class _Handler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Internal handler that just asks the server to handle the request."""
def do_GET(self):
if self.path.endswith('favicon.ico'):
self.send_error(404)
return
on_request(Request(self), Responder(self))
def log_message(self, *args, **kwargs):
"""Overriddes base class method to disable logging."""
pass
BaseHTTPServer.HTTPServer.__init__(self, ('127.0.0.1', 0), _Handler)
if server_cert_and_key_path is not None:
self._is_https_enabled = True
self._server.socket = ssl.wrap_socket(
self._server.socket, certfile=server_cert_and_key_path,
server_side=True)
else:
self._is_https_enabled = False
def handle_timeout(self):
"""Overridden from SocketServer."""
raise RuntimeError('Timed out waiting for http request')
def GetUrl(self):
"""Returns the base URL of the server."""
postfix = '://127.0.0.1:%s' % self.server_port
if self._is_https_enabled:
return 'https' + postfix
return 'http' + postfix
class WebServer(object):
"""An HTTP or HTTPS server that serves on its own thread.
Serves files from given directory but may use custom data for specific paths.
"""
def __init__(self, root_dir, server_cert_and_key_path=None):
"""Starts the server.
It is an HTTP server if parameter server_cert_and_key_path is not provided.
Otherwise, it is an HTTPS server.
Args:
root_dir: root path to serve files from. This parameter is required.
server_cert_and_key_path: path to a PEM file containing the cert and key.
if it is None, start the server as an HTTP one.
"""
self._root_dir = os.path.abspath(root_dir)
self._server = _BaseServer(self._OnRequest, server_cert_and_key_path)
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.daemon = True
self._thread.start()
self._path_data_map = {}
self._path_callback_map = {}
self._path_maps_lock = threading.Lock()
def _OnRequest(self, request, responder):
path = request.GetPath().split('?')[0]
# Serve from path -> callback and data maps.
self._path_maps_lock.acquire()
try:
if path in self._path_callback_map:
body = self._path_callback_map[path](request)
if body:
responder.SendResponse(body)
else:
responder.SendError(503)
return
if path in self._path_data_map:
responder.SendResponse(self._path_data_map[path])
return
finally:
self._path_maps_lock.release()
# Serve from file.
path = os.path.normpath(
os.path.join(self._root_dir, *path.split('/')))
if not path.startswith(self._root_dir):
responder.SendError(403)
return
if not os.path.exists(path):
responder.SendError(404)
return
responder.SendResponseFromFile(path)
def SetDataForPath(self, path, data):
self._path_maps_lock.acquire()
try:
self._path_data_map[path] = data
finally:
self._path_maps_lock.release()
def SetCallbackForPath(self, path, func):
self._path_maps_lock.acquire()
try:
self._path_callback_map[path] = func
finally:
self._path_maps_lock.release()
def GetUrl(self):
"""Returns the base URL of the server."""
return self._server.GetUrl()
def Shutdown(self):
"""Shuts down the server synchronously."""
self._server.shutdown()
self._thread.join()
class SyncWebServer(object):
"""WebServer for testing.
Incoming requests are blocked until explicitly handled.
This was designed for single thread use. All requests should be handled on
the same thread.
"""
def __init__(self):
self._server = _BaseServer(self._OnRequest)
# Recognized by SocketServer.
self._server.timeout = 10
self._on_request = None
def _OnRequest(self, request, responder):
self._on_request(responder)
self._on_request = None
def Respond(self, on_request):
"""Blocks until request comes in, then calls given handler function.
Args:
on_request: Function that handles the request. Invoked with single
parameter, an instance of Responder.
"""
if self._on_request:
raise RuntimeError('Must handle 1 request at a time.')
self._on_request = on_request
while self._on_request:
# Don't use handle_one_request, because it won't work with the timeout.
self._server.handle_request()
def RespondWithContent(self, content):
"""Blocks until request comes in, then handles it with the given content."""
def SendContent(responder):
responder.SendResponse(content)
self.Respond(SendContent)
def GetUrl(self):
return self._server.GetUrl()
| bsd-3-clause |
pgmillon/ansible | test/sanity/pylint/plugins/deprecated.py | 13 | 3853 | # (c) 2018, Matt Martz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from distutils.version import LooseVersion
import astroid
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
from ansible.release import __version__ as ansible_version_raw
MSGS = {
'E9501': ("Deprecated version (%r) found in call to Display.deprecated "
"or AnsibleModule.deprecate",
"ansible-deprecated-version",
"Used when a call to Display.deprecated specifies a version "
"less than or equal to the current version of Ansible",
{'minversion': (2, 6)}),
'E9502': ("Display.deprecated call without a version",
"ansible-deprecated-no-version",
"Used when a call to Display.deprecated does not specify a "
"version",
{'minversion': (2, 6)}),
'E9503': ("Invalid deprecated version (%r) found in call to "
"Display.deprecated or AnsibleModule.deprecate",
"ansible-invalid-deprecated-version",
"Used when a call to Display.deprecated specifies an invalid "
"version number",
{'minversion': (2, 6)}),
}
ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version_raw.split('.')[:3]))
def _get_expr_name(node):
"""Funciton to get either ``attrname`` or ``name`` from ``node.func.expr``
Created specifically for the case of ``display.deprecated`` or ``self._display.deprecated``
"""
try:
return node.func.expr.attrname
except AttributeError:
# If this fails too, we'll let it raise, the caller should catch it
return node.func.expr.name
class AnsibleDeprecatedChecker(BaseChecker):
"""Checks for Display.deprecated calls to ensure that the ``version``
has not passed or met the time for removal
"""
__implements__ = (IAstroidChecker,)
name = 'deprecated'
msgs = MSGS
@check_messages(*(MSGS.keys()))
def visit_call(self, node):
version = None
try:
if (node.func.attrname == 'deprecated' and 'display' in _get_expr_name(node) or
node.func.attrname == 'deprecate' and 'module' in _get_expr_name(node)):
if node.keywords:
for keyword in node.keywords:
if len(node.keywords) == 1 and keyword.arg is None:
# This is likely a **kwargs splat
return
elif keyword.arg == 'version':
if isinstance(keyword.value.value, astroid.Name):
# This is likely a variable
return
version = keyword.value.value
if not version:
try:
version = node.args[1].value
except IndexError:
self.add_message('ansible-deprecated-no-version', node=node)
return
try:
if ANSIBLE_VERSION >= LooseVersion(str(version)):
self.add_message('ansible-deprecated-version', node=node, args=(version,))
except ValueError:
self.add_message('ansible-invalid-deprecated-version', node=node, args=(version,))
except AttributeError:
# Not the type of node we are interested in
pass
def register(linter):
"""required method to auto register this checker """
linter.register_checker(AnsibleDeprecatedChecker(linter))
| gpl-3.0 |
joelddiaz/openshift-tools | ansible/roles/lib_gcloud/build/src/gcloud_compute_label.py | 10 | 5323 | # pylint: skip-file
# pylint: disable=too-many-instance-attributes
class GcloudComputeLabel(GcloudCLI):
''' Class to wrap the gcloud compute images command'''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
project,
zone,
labels,
name=None,
verbose=False):
''' Constructor for gcloud resource '''
super(GcloudComputeLabel, self).__init__(None, project)
self.zone = zone
self.labels = labels
self.verbose = verbose
self.name = name
self.existing_labels = {}
self.existing_metadata = None
# gcp returns a list of labels as a list of [{ 'key': 'key_value', 'value': 'value_value'
# this is hard to work with. this will create one big dict of them
def gcp_labels_to_dict(self, label_list):
''' let's make a dict out of the labels that GCP returns '''
# Moving the {"key" : "key_value", "value" : "value_value" }
# to { "key_value" : "value_value"
for i in label_list:
self.existing_labels[i['key']] = i['value']
def get_labels(self):
''' get a list of labels '''
results = self._list_metadata('instances', self.name, self.zone)
if results['returncode'] == 0:
self.existing_metadata = yaml.load(results['results'])
self.gcp_labels_to_dict(self.existing_metadata['metadata']['items'])
results['instance_metadata'] = self.existing_metadata
results['instance_labels'] = self.existing_labels
results.pop('results', None)
# Set zone if not already set
if not self.zone:
self.zone = self.existing_metadata['zone'].split('/')[-1]
print self.zone
return results
def delete_labels(self):
''' remove labels from a disk '''
label_keys_to_be_deleted = []
for i in self.labels.keys():
if i in self.existing_labels:
label_keys_to_be_deleted.append(i)
if label_keys_to_be_deleted:
results = self._delete_metadata('instances', label_keys_to_be_deleted, False, self.name, self.zone)
self.get_labels()
results['instance_labels'] = self.existing_labels
return results
else:
return {'no_deletes_needed' : True, 'instance_labels' : self.existing_labels}
def create_labels(self, labels=None):
'''set the labels for a disk'''
labels_to_create = {}
for i in self.labels.keys():
if i in self.existing_labels:
if self.labels[i] != self.existing_labels[i]:
labels_to_create[i] = self.labels[i]
else:
labels_to_create[i] = self.labels[i]
if labels_to_create:
results = self._create_metadata('instances', labels_to_create, name=self.name, zone=self.zone)
self.get_labels()
results['instance_labels'] = self.existing_labels
return results
else:
return {'no_creates_needed' : True, 'instance_labels' : self.existing_labels}
# pylint: disable=too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
''' run the ansible code '''
compute_labels = GcloudComputeLabel(params['project'],
params['zone'],
params['labels'],
params['name'],
)
state = params['state']
api_rval = compute_labels.get_labels()
#####
# Get
#####
if state == 'list':
if api_rval['returncode'] != 0:
return {'failed': True, 'msg' : api_rval, 'state' : state}
return {'changed' : False, 'results' : api_rval, 'state' : state}
########
# Delete
########
if state == 'absent':
api_rval = compute_labels.delete_labels()
if check_mode:
return {'changed': False, 'msg': 'Would have performed a delete.'}
if 'returncode' in api_rval and api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval, 'state': state}
if "no_deletes_needed" in api_rval:
return {'changed': False, 'state': "absent", 'results': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Create
########
if state == 'present':
api_rval = compute_labels.create_labels()
if check_mode:
return {'changed': False, 'msg': 'Would have performed a create.'}
if 'returncode' in api_rval and api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval, 'state': state}
if "no_creates_needed" in api_rval:
return {'changed': False, 'state': "present", 'results': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'failed': True, 'changed': False, 'msg': 'Unknown state passed. %s' % state, 'state' : "unknown"}
| apache-2.0 |
BT-ojossen/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/LoginTest.py | 384 | 1320 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
if __name__<>"package":
from ServerParameter import *
from lib.gui import *
class LoginTest:
def __init__(self):
if not loginstatus:
Change(None)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
creasyw/IMTAphy | documentation/doctools/branches/0.4.x/tests/etree13/ElementTree.py | 4 | 51433 | #
# ElementTree
# $Id: ElementTree.py 65372 2008-08-01 19:11:22Z georg.brandl $
#
# light-weight XML support for Python 2.2 and later.
#
# history:
# 2001-10-20 fl created (from various sources)
# 2001-11-01 fl return root from parse method
# 2002-02-16 fl sort attributes in lexical order
# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
# 2002-05-01 fl finished TreeBuilder refactoring
# 2002-07-14 fl added basic namespace support to ElementTree.write
# 2002-07-25 fl added QName attribute support
# 2002-10-20 fl fixed encoding in write
# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
# 2002-11-27 fl accept file objects or file names for parse/write
# 2002-12-04 fl moved XMLTreeBuilder back to this module
# 2003-01-11 fl fixed entity encoding glitch for us-ascii
# 2003-02-13 fl added XML literal factory
# 2003-02-21 fl added ProcessingInstruction/PI factory
# 2003-05-11 fl added tostring/fromstring helpers
# 2003-05-26 fl added ElementPath support
# 2003-07-05 fl added makeelement factory method
# 2003-07-28 fl added more well-known namespace prefixes
# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas Dartsch)
# 2003-09-04 fl fall back on emulator if ElementPath is not installed
# 2003-10-31 fl markup updates
# 2003-11-15 fl fixed nested namespace bug
# 2004-03-28 fl added XMLID helper
# 2004-06-02 fl added default support to findtext
# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
# 2004-08-23 fl take advantage of post-2.1 expat features
# 2004-09-03 fl made Element class visible; removed factory
# 2005-02-01 fl added iterparse implementation
# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
# 2005-11-12 fl added tostringlist/fromstringlist helpers
# 2006-07-05 fl merged in selected changes from the 1.3 sandbox
# 2006-07-05 fl removed support for 2.1 and earlier
# 2007-06-21 fl added deprecation/future warnings
# 2007-08-25 fl added doctype hook, added parser version attribute etc
# 2007-08-26 fl added new serializer code (better namespace handling, etc)
# 2007-08-27 fl warn for broken /tag searches on tree level
# 2007-09-02 fl added html/text methods to serializer (experimental)
# 2007-09-05 fl added method argument to tostring/tostringlist
# 2007-09-06 fl improved error handling
#
# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
from __future__ import generators
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML",
"XMLParser", "XMLTreeBuilder",
]
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} constructor
# or the {@link #SubElement} factory function.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import sys, re
class _SimpleElementPath(object):
# emulate pre-1.2 find/findtext/findall behaviour
def find(self, element, tag):
for elem in element:
if elem.tag == tag:
return elem
return None
def findtext(self, element, tag, default=None):
for elem in element:
if elem.tag == tag:
return elem.text or ""
return default
def findall(self, element, tag):
if tag[:3] == ".//":
return element.getiterator(tag[3:])
result = []
for elem in element:
if elem.tag == tag:
result.append(elem)
return result
try:
import ElementPath
except ImportError:
# FIXME: issue warning in this case?
ElementPath = _SimpleElementPath()
VERSION = "1.3a2"
class ParseError(SyntaxError):
pass
# --------------------------------------------------------------------
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this; might be a better idea to look
# for tag/attrib/text attributes
return isinstance(element, Element) or hasattr(element, "tag")
##
# Element class. This class defines the Element interface, and
# provides a reference implementation of this interface.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class Element(object):
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #Element.get},
# {@link #Element.set},
# {@link #Element.keys}, and
# {@link #Element.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None, if there was no text.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None, if there was no text.
tail = None # text after end tag, if any
def __init__(self, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at %x>" % (repr(self.tag), id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return Element(tag, attrib)
##
# Returns the number of subelements.
#
# @return The number of subelements.
def __len__(self):
return len(self._children)
def __nonzero__(self):
import warnings
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning
)
return len(self._children) != 0 # emulate old behaviour
##
# Returns the given subelement.
#
# @param index What subelement to return.
# @return The given subelement.
# @exception IndexError If the given element does not exist.
def __getitem__(self, index):
return self._children[index]
##
# Replaces the given subelement.
#
# @param index What subelement to replace.
# @param element The new element value.
# @exception IndexError If the given element does not exist.
# @exception AssertionError If element is not a valid object.
def __setitem__(self, index, element):
assert iselement(element)
self._children[index] = element
##
# Deletes the given subelement.
#
# @param index What subelement to delete.
# @exception IndexError If the given element does not exist.
def __delitem__(self, index):
del self._children[index]
##
# Returns a list containing subelements in the given range.
#
# @param start The first subelement to return.
# @param stop The first subelement that shouldn't be returned.
# @return A sequence object containing subelements.
def __getslice__(self, start, stop):
return self._children[start:stop]
##
# Replaces a number of subelements with elements from a sequence.
#
# @param start The first subelement to replace.
# @param stop The first subelement that shouldn't be replaced.
# @param elements A sequence object with zero or more elements.
# @exception AssertionError If a sequence member is not a valid object.
def __setslice__(self, start, stop, elements):
for element in elements:
assert iselement(element)
self._children[start:stop] = list(elements)
##
# Deletes a number of subelements.
#
# @param start The first subelement to delete.
# @param stop The first subelement to leave in there.
def __delslice__(self, start, stop):
del self._children[start:stop]
##
# Adds a subelement to the end of this element.
#
# @param element The element to add.
# @exception AssertionError If a sequence member is not a valid object.
def append(self, element):
assert iselement(element)
self._children.append(element)
##
# Appends subelements from a sequence.
#
# @param elements A sequence object with zero or more elements.
# @exception AssertionError If a subelement is not a valid object.
# @since 1.3
def extend(self, elements):
for element in elements:
assert iselement(element)
self._children.extend(elements)
##
# Inserts a subelement at the given position in this element.
#
# @param index Where to insert the new subelement.
# @exception AssertionError If the element is not a valid object.
def insert(self, index, element):
assert iselement(element)
self._children.insert(index, element)
##
# Removes a matching subelement. Unlike the <b>find</b> methods,
# this method compares elements based on identity, not on tag
# value or contents.
#
# @param element What element to remove.
# @exception ValueError If a matching element could not be found.
# @exception AssertionError If the element is not a valid object.
def remove(self, element):
assert iselement(element)
self._children.remove(element)
##
# (Deprecated) Returns all subelements. The elements are returned
# in document order.
#
# @return A list of subelements.
# @defreturn list of Element instances
def getchildren(self):
import warnings
warnings.warn(
"This method will be removed in future versions. "
"Use 'list(elem)' or iteration over elem instead.",
DeprecationWarning
)
return self._children
##
# Finds the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path):
return ElementPath.find(self, path)
##
# Finds text for the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# has is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None):
return ElementPath.findtext(self, path, default)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path):
return ElementPath.findall(self, path)
##
# Resets an element. This function removes all subelements, clears
# all attributes, and sets the text and tail attributes to None.
def clear(self):
self.attrib.clear()
self._children = []
self.text = self.tail = None
##
# Gets an element attribute.
#
# @param key What attribute to look for.
# @param default What to return if the attribute was not found.
# @return The attribute value, or the default value, if the
# attribute was not found.
# @defreturn string or None
def get(self, key, default=None):
return self.attrib.get(key, default)
##
# Sets an element attribute.
#
# @param key What attribute to set.
# @param value The attribute value.
def set(self, key, value):
self.attrib[key] = value
##
# Gets a list of attribute names. The names are returned in an
# arbitrary order (just like for an ordinary Python dictionary).
#
# @return A list of element attribute names.
# @defreturn list of strings
def keys(self):
return self.attrib.keys()
##
# Gets element attributes, as a sequence. The attributes are
# returned in an arbitrary order.
#
# @return A list of (name, value) tuples for all attributes.
# @defreturn list of (string, string) tuples
def items(self):
return self.attrib.items()
##
# Creates a tree iterator. The iterator loops over this element
# and all subelements, in document order, and returns all elements
# with a matching tag.
# <p>
# If the tree structure is modified during iteration, new or removed
# elements may or may not be included. To get a stable set, use the
# list() function on the iterator, and loop over the resulting list.
#
# @param tag What tags to look for (default is to return all elements).
# @return An iterator containing all the matching elements.
# @defreturn iterator
def iter(self, tag=None):
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
for e in e.iter(tag):
yield e
# compatibility (FIXME: preserve list behaviour too? see below)
getiterator = iter
# def getiterator(self, tag=None):
# return list(tag)
##
# Creates a text iterator. The iterator loops over this element
# and all subelements, in document order, and returns all inner
# text.
#
# @return An iterator containing all inner text.
# @defreturn iterator
def itertext(self):
if self.text:
yield self.text
for e in self:
for s in e.itertext():
yield s
if e.tail:
yield e.tail
# compatibility
_Element = _ElementInterface = Element
##
# Subelement factory. This function creates an element instance, and
# appends it to an existing element.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param parent The parent element.
# @param tag The subelement name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def SubElement(parent, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
##
# Comment element factory. This factory function creates a special
# element that will be serialized as an XML comment by the standard
# serializer.
# <p>
# The comment string can be either an 8-bit ASCII string or a Unicode
# string.
#
# @param text A string containing the comment string.
# @return An element instance, representing a comment.
# @defreturn Element
def Comment(text=None):
element = Element(Comment)
element.text = text
return element
##
# PI element factory. This factory function creates a special element
# that will be serialized as an XML processing instruction by the standard
# serializer.
#
# @param target A string containing the PI target.
# @param text A string containing the PI contents, if any.
# @return An element instance, representing a PI.
# @defreturn Element
def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
##
# QName wrapper. This can be used to wrap a QName attribute value, in
# order to get proper namespace handling on output.
#
# @param text A string containing the QName value, in the form {uri}local,
# or, if the tag argument is given, the URI part of a QName.
# @param tag Optional tag. If given, the first argument is interpreted as
# an URI, and this argument is interpreted as a local name.
# @return An opaque object, representing the QName.
class QName(object):
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __hash__(self):
return hash(self.text)
def __cmp__(self, other):
if isinstance(other, QName):
return cmp(self.text, other.text)
return cmp(self.text, other)
# --------------------------------------------------------------------
##
# ElementTree wrapper class. This class represents an entire element
# hierarchy, and adds some extra support for serialization to and from
# standard XML.
#
# @param element Optional root element.
# @keyparam file Optional file handle or file name. If given, the
# tree is initialized with the contents of this XML file.
class ElementTree(object):
def __init__(self, element=None, file=None):
assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
##
# Gets the root element for this tree.
#
# @return An element instance.
# @defreturn Element
def getroot(self):
return self._root
##
# Replaces the root element for this tree. This discards the
# current contents of the tree, and replaces it with the given
# element. Use with care.
#
# @param element An element instance.
def _setroot(self, element):
assert iselement(element)
self._root = element
##
# Loads an external XML document into this element tree.
#
# @param source A file name or file object.
# @keyparam parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return The document root element.
# @defreturn Element
def parse(self, source, parser=None):
if not hasattr(source, "read"):
source = open(source, "rb")
if not parser:
parser = XMLParser(target=TreeBuilder())
while 1:
data = source.read(32768)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
##
# Creates a tree iterator for the root element. The iterator loops
# over all elements in this tree, in document order.
#
# @param tag What tags to look for (default is to return all elements)
# @return An iterator.
# @defreturn iterator
def iter(self, tag=None):
assert self._root is not None
return self._root.iter(tag)
getiterator = iter
##
# Finds the first toplevel element with given tag.
# Same as getroot().find(path).
#
# @param path What element to look for.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path):
assert self._root is not None
if path[:1] == "/":
path = "." + path
import warnings
warnings.warn(
"This search is broken in 1.3 and earlier; if you rely "
"on the current behaviour, change it to %r" % path,
FutureWarning
)
return self._root.find(path)
##
# Finds the element text for the first toplevel element with given
# tag. Same as getroot().findtext(path).
#
# @param path What toplevel element to look for.
# @param default What to return if the element was not found.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# has is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None):
assert self._root is not None
if path[:1] == "/":
path = "." + path
import warnings
warnings.warn(
"This search is broken in 1.3 and earlier; if you rely "
"on the current behaviour, change it to %r" % path,
FutureWarning
)
return self._root.findtext(path, default)
##
# Finds all toplevel elements with the given tag.
# Same as getroot().findall(path).
#
# @param path What element to look for.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path):
assert self._root is not None
if path[:1] == "/":
path = "." + path
import warnings
warnings.warn(
"This search is broken in 1.3 and earlier; if you rely "
"on the current behaviour, change it to %r" % path,
FutureWarning
)
return self._root.findall(path)
##
# Writes the element tree to a file, as XML.
#
# @param file A file name, or a file object opened for writing.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam method Optional output method ("xml" or "html"; default
# is "xml".
# @keyparam xml_declaration Controls if an XML declaration should
# be added to the file. Use False for never, True for always,
# None for only if not US-ASCII or UTF-8. None is default.
def write(self, file,
# keyword arguments
encoding="us-ascii",
xml_declaration=None,
default_namespace=None,
method=None):
assert self._root is not None
if not hasattr(file, "write"):
file = open(file, "wb")
write = file.write
if not method:
method = "xml"
if not encoding:
encoding = "us-ascii"
elif xml_declaration or (xml_declaration is None and
encoding not in ("utf-8", "us-ascii")):
write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
if method == "text":
_serialize_text(write, self._root, encoding)
else:
qnames, namespaces = _namespaces(
self._root, encoding, default_namespace
)
if method == "xml":
_serialize_xml(
write, self._root, encoding, qnames, namespaces
)
elif method == "html":
_serialize_html(
write, self._root, encoding, qnames, namespaces
)
else:
raise ValueError("unknown method %r" % method)
# --------------------------------------------------------------------
# serialization support
def _namespaces(elem, encoding, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def encode(text):
return text.encode(encoding)
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].split("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = encode("%s:%s" % (prefix, tag))
else:
qnames[qname] = encode(tag) # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = encode(qname)
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName) and tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, basestring):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_xml(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
items.sort() # lexical order
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v, encoding)
write(" %s=\"%s\"" % (qnames[k], v))
if namespaces:
items = namespaces.items()
items.sort(key=lambda x: x[1]) # sort on prefix
for v, k in items:
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
if text or len(elem):
write(">")
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_xml(write, e, encoding, qnames, None)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta" "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
items.sort() # lexical order
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v, encoding)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
if namespaces:
items = namespaces.items()
items.sort(key=lambda x: x[1]) # sort on prefix
for v, k in items:
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(_encode(text, encoding))
else:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
def _serialize_text(write, elem, encoding):
for part in elem.itertext():
write(part.encode(encoding))
if elem.tail:
write(elem.tail.encode(encoding))
##
# Registers a namespace prefix. The registry is global, and any
# existing mapping for either the given prefix or the namespace URI
# will be removed.
#
# @param prefix Namespace prefix.
# @param uri Namespace uri. Tags and attributes in this namespace
# will be serialized with the given prefix, if at all possible.
# @raise ValueError If the prefix is reserved, or is otherwise
# invalid.
def register_namespace(prefix, uri):
if re.match("ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in _namespace_map.items():
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublic core
"http://purl.org/dc/elements/1.1/": "dc",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_cdata(text, encoding):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
##
# Generates a string representation of an XML element, including all
# subelements.
#
# @param element An Element instance.
# @return An encoded string containing the XML data.
# @defreturn string
def tostring(element, encoding=None, method=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding, method=method)
return "".join(data)
##
# Generates a string representation of an XML element, including all
# subelements. The string is returned as a sequence of string fragments.
#
# @param element An Element instance.
# @return A sequence object containing the XML data.
# @defreturn sequence
# @since 1.3
def tostringlist(element, encoding=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding)
# FIXME: merge small fragments into larger parts
return data
##
# Writes an element tree or element structure to sys.stdout. This
# function should be used for debugging only.
# <p>
# The exact output format is implementation dependent. In this
# version, it's written as an ordinary XML file.
#
# @param elem An element tree or an individual element.
def dump(elem):
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout)
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
# --------------------------------------------------------------------
# parsing
##
# Parses an XML document into an element tree.
#
# @param source A filename or file object containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An ElementTree instance
def parse(source, parser=None):
tree = ElementTree()
tree.parse(source, parser)
return tree
##
# Parses an XML document into an element tree incrementally, and reports
# what's going on to the user.
#
# @param source A filename or file object containing XML data.
# @param events A list of events to report back. If omitted, only "end"
# events are reported.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A (event, elem) iterator.
def iterparse(source, events=None, parser=None):
if not hasattr(source, "read"):
source = open(source, "rb")
if not parser:
parser = XMLParser(target=TreeBuilder())
return _IterParseIterator(source, events, parser)
class _IterParseIterator(object):
def __init__(self, source, events, parser):
self._file = source
self._events = []
self._index = 0
self.root = self._root = None
self._parser = parser
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except AttributeError:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
try:
uri = uri.encode("ascii")
except UnicodeError:
pass
append((event, (prefix or "", uri)))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
def next(self):
while 1:
try:
item = self._events[self._index]
except IndexError:
if self._parser is None:
self.root = self._root
raise StopIteration
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
self._parser.feed(data)
else:
self._root = self._parser.close()
self._parser = None
else:
self._index = self._index + 1
return item
def __iter__(self):
return self
##
# Parses an XML document from a string constant. This function can
# be used to embed "XML literals" in Python code.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
def XML(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
##
# Parses an XML document from a string constant, and also returns
# a dictionary which maps from element id:s to elements.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A tuple containing an Element instance and a dictionary.
# @defreturn (Element, dictionary)
def XMLID(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.getiterator():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
##
# Parses an XML document from a string constant. Same as {@link #XML}.
#
# @def fromstring(text)
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
fromstring = XML
##
# Parses an XML document from a sequence of string fragments.
#
# @param sequence A list or other sequence containing XML data fragments.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
# @since 1.3
def fromstringlist(sequence, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
##
# Generic element structure builder. This builder converts a sequence
# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
# #TreeBuilder.end} method calls to a well-formed element structure.
# <p>
# You can use this class to build an element structure using a custom XML
# parser, or a parser for some other XML-like format.
#
# @param element_factory Optional element factory. This factory
# is called to create new Element instances, as necessary.
class TreeBuilder(object):
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = Element
self._factory = element_factory
##
# Flushes the builder buffers, and returns the toplevel document
# element.
#
# @return An Element instance.
# @defreturn Element
def close(self):
assert len(self._elem) == 0, "missing end tags"
assert self._last != None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
##
# Adds text to the current element.
#
# @param data A string. This should be either an 8-bit string
# containing ASCII text, or a Unicode string.
def data(self, data):
self._data.append(data)
##
# Opens a new element.
#
# @param tag The element name.
# @param attrib A dictionary containing element attributes.
# @return The opened element.
# @defreturn Element
def start(self, tag, attrs):
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
##
# Closes the current element.
#
# @param tag The element name.
# @return The closed element.
# @defreturn Element
def end(self, tag):
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
#
# @keyparam target Target object. If omitted, the builder uses an
# instance of the standard {@link #TreeBuilder} class.
# @keyparam html Predefine HTML entities. This flag is not supported
# by the current implementation.
# @keyparam encoding Optional encoding. If given, the value overrides
# the encoding specified in the XML file.
# @see #ElementTree
# @see #TreeBuilder
class XMLParser(object):
def __init__(self, html=0, target=None, encoding=None):
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat; expat = pyexpat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# callbacks
parser.DefaultHandlerExpand = self._default
parser.StartElementHandler = self._start
parser.EndElementHandler = self._end
parser.CharacterDataHandler = self._data
# let expat do the buffering, if supported
try:
self._parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
self._parser.ordered_attributes = 1
self._parser.specified_attributes = 1
parser.StartElementHandler = self._start_list
except AttributeError:
pass
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixtext(self, text):
# convert text string to ascii, if possible
try:
return text.encode("ascii")
except UnicodeError:
return text
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name = self._fixtext(name)
return name
def _start(self, tag, attrib_in):
fixname = self._fixname
fixtext = self._fixtext
tag = fixname(tag)
attrib = {}
for key, value in attrib_in.items():
attrib[fixname(key)] = fixtext(value)
return self.target.start(tag, attrib)
def _start_list(self, tag, attrib_in):
fixname = self._fixname
fixtext = self._fixtext
tag = fixname(tag)
attrib = {}
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[fixname(attrib_in[i])] = fixtext(attrib_in[i+1])
return self.target.start(tag, attrib)
def _data(self, text):
return self.target.data(self._fixtext(text))
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
self.target.data(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self._parser.ErrorLineNumber,
self._parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self._parser.ErrorLineNumber
err.offset = self._parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if pubid:
pubid = pubid[1:-1]
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
try:
self._parser.Parse(data, 0)
except self._error, v:
self._raiseerror(v)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
try:
self._parser.Parse("", 1) # end of data
except self._error, v:
self._raiseerror(v)
tree = self.target.close()
del self.target, self._parser # get rid of circular references
return tree
# compatibility
XMLTreeBuilder = XMLParser
| gpl-2.0 |
cecep-edu/edx-platform | openedx/core/djangoapps/bookmarks/tests/test_views.py | 7 | 20744 | """
Tests for bookmark views.
"""
import ddt
import json
from nose.plugins.attrib import attr
from unittest import skipUnless
import urllib
from django.conf import settings
from django.core.urlresolvers import reverse
from mock import patch
from rest_framework.test import APIClient
from xmodule.modulestore import ModuleStoreEnum
from .test_models import BookmarksTestsBase
from .test_api import BookmarkApiEventTestMixin
# pylint: disable=no-member
class BookmarksViewsTestsBase(BookmarksTestsBase, BookmarkApiEventTestMixin):
"""
Base class for bookmarks views tests.
"""
def setUp(self):
super(BookmarksViewsTestsBase, self).setUp()
self.anonymous_client = APIClient()
self.client = self.login_client(user=self.user)
def login_client(self, user):
"""
Helper method for getting the client and user and logging in. Returns client.
"""
client = APIClient()
client.login(username=user.username, password=self.TEST_PASSWORD)
return client
def send_get(self, client, url, query_parameters=None, expected_status=200):
"""
Helper method for sending a GET to the server. Verifies the expected status and returns the response.
"""
url = url + '?' + query_parameters if query_parameters else url
response = client.get(url)
self.assertEqual(expected_status, response.status_code)
return response
def send_post(self, client, url, data, content_type='application/json', expected_status=201):
"""
Helper method for sending a POST to the server. Verifies the expected status and returns the response.
"""
response = client.post(url, data=json.dumps(data), content_type=content_type)
self.assertEqual(expected_status, response.status_code)
return response
def send_delete(self, client, url, expected_status=204):
"""
Helper method for sending a DELETE to the server. Verifies the expected status and returns the response.
"""
response = client.delete(url)
self.assertEqual(expected_status, response.status_code)
return response
@attr('shard_2')
@ddt.ddt
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Tests only valid in LMS')
class BookmarksListViewTests(BookmarksViewsTestsBase):
"""
This contains the tests for GET & POST methods of bookmark.views.BookmarksListView class
GET /api/bookmarks/v1/bookmarks/?course_id={course_id1}
POST /api/bookmarks/v1/bookmarks
"""
@ddt.data(
(1, False),
(10, False),
(25, False),
(1, True),
(10, True),
(25, True),
)
@ddt.unpack
@patch('eventtracking.tracker.emit')
def test_get_bookmarks_successfully(self, bookmarks_count, check_all_fields, mock_tracker):
"""
Test that requesting bookmarks for a course returns records successfully in
expected order without optional fields.
"""
course, __, bookmarks = self.create_course_with_bookmarks_count(
bookmarks_count, store_type=ModuleStoreEnum.Type.mongo
)
query_parameters = 'course_id={}&page_size={}'.format(urllib.quote(unicode(course.id)), 100)
if check_all_fields:
query_parameters += '&fields=path,display_name'
response = self.send_get(
client=self.client,
url=reverse('bookmarks'),
query_parameters=query_parameters,
)
bookmarks_data = response.data['results']
self.assertEqual(len(bookmarks_data), len(bookmarks))
self.assertEqual(response.data['count'], len(bookmarks))
self.assertEqual(response.data['num_pages'], 1)
# As bookmarks are sorted by -created so we will compare in that order.
self.assert_bookmark_data_is_valid(bookmarks[-1], bookmarks_data[0], check_optional_fields=check_all_fields)
self.assert_bookmark_data_is_valid(bookmarks[0], bookmarks_data[-1], check_optional_fields=check_all_fields)
self.assert_bookmark_event_emitted(
mock_tracker,
event_name='edx.bookmark.listed',
course_id=unicode(course.id),
list_type='per_course',
bookmarks_count=bookmarks_count,
page_size=100,
page_number=1
)
@ddt.data(
10, 25
)
@patch('eventtracking.tracker.emit')
def test_get_bookmarks_with_pagination(self, bookmarks_count, mock_tracker):
"""
Test that requesting bookmarks for a course return results with pagination 200 code.
"""
course, __, bookmarks = self.create_course_with_bookmarks_count(
bookmarks_count, store_type=ModuleStoreEnum.Type.mongo
)
page_size = 5
query_parameters = 'course_id={}&page_size={}'.format(urllib.quote(unicode(course.id)), page_size)
response = self.send_get(
client=self.client,
url=reverse('bookmarks'),
query_parameters=query_parameters
)
bookmarks_data = response.data['results']
# Pagination assertions.
self.assertEqual(response.data['count'], bookmarks_count)
self.assertIn('page=2&page_size={}'.format(page_size), response.data['next'])
self.assertEqual(response.data['num_pages'], bookmarks_count / page_size)
self.assertEqual(len(bookmarks_data), min(bookmarks_count, page_size))
self.assert_bookmark_data_is_valid(bookmarks[-1], bookmarks_data[0])
self.assert_bookmark_event_emitted(
mock_tracker,
event_name='edx.bookmark.listed',
course_id=unicode(course.id),
list_type='per_course',
bookmarks_count=bookmarks_count,
page_size=page_size,
page_number=1
)
@patch('eventtracking.tracker.emit')
def test_get_bookmarks_with_invalid_data(self, mock_tracker):
"""
Test that requesting bookmarks with invalid data returns 0 records.
"""
# Invalid course id.
response = self.send_get(
client=self.client,
url=reverse('bookmarks'),
query_parameters='course_id=invalid'
)
bookmarks_data = response.data['results']
self.assertEqual(len(bookmarks_data), 0)
self.assertFalse(mock_tracker.emit.called) # pylint: disable=maybe-no-member
@patch('eventtracking.tracker.emit')
def test_get_all_bookmarks_when_course_id_not_given(self, mock_tracker):
"""
Test that requesting bookmarks returns all records for that user.
"""
# Without course id we would return all the bookmarks for that user.
response = self.send_get(
client=self.client,
url=reverse('bookmarks')
)
bookmarks_data = response.data['results']
self.assertEqual(len(bookmarks_data), 3)
self.assert_bookmark_data_is_valid(self.other_bookmark_1, bookmarks_data[0])
self.assert_bookmark_data_is_valid(self.bookmark_2, bookmarks_data[1])
self.assert_bookmark_data_is_valid(self.bookmark_1, bookmarks_data[2])
self.assert_bookmark_event_emitted(
mock_tracker,
event_name='edx.bookmark.listed',
list_type='all_courses',
bookmarks_count=3,
page_size=10,
page_number=1
)
def test_anonymous_access(self):
"""
Test that an anonymous client (not logged in) cannot call GET or POST.
"""
query_parameters = 'course_id={}'.format(self.course_id)
self.send_get(
client=self.anonymous_client,
url=reverse('bookmarks'),
query_parameters=query_parameters,
expected_status=401
)
self.send_post(
client=self.anonymous_client,
url=reverse('bookmarks'),
data={'usage_id': 'test'},
expected_status=401
)
def test_post_bookmark_successfully(self):
"""
Test that posting a bookmark successfully returns newly created data with 201 code.
"""
response = self.send_post(
client=self.client,
url=reverse('bookmarks'),
data={'usage_id': unicode(self.vertical_3.location)}
)
# Assert Newly created bookmark.
self.assertEqual(response.data['id'], '%s,%s' % (self.user.username, unicode(self.vertical_3.location)))
self.assertEqual(response.data['course_id'], self.course_id)
self.assertEqual(response.data['usage_id'], unicode(self.vertical_3.location))
self.assertIsNotNone(response.data['created'])
self.assertEqual(len(response.data['path']), 2)
self.assertEqual(response.data['display_name'], self.vertical_3.display_name)
def test_post_bookmark_with_invalid_data(self):
"""
Test that posting a bookmark for a block with invalid usage id returns a 400.
Scenarios:
1) Invalid usage id.
2) Without usage id.
3) With empty request.data
"""
# Send usage_id with invalid format.
response = self.send_post(
client=self.client,
url=reverse('bookmarks'),
data={'usage_id': 'invalid'},
expected_status=400
)
self.assertEqual(response.data['user_message'], u'An error has occurred. Please try again.')
# Send data without usage_id.
response = self.send_post(
client=self.client,
url=reverse('bookmarks'),
data={'course_id': 'invalid'},
expected_status=400
)
self.assertEqual(response.data['user_message'], u'An error has occurred. Please try again.')
self.assertEqual(response.data['developer_message'], u'Parameter usage_id not provided.')
# Send empty data dictionary.
with self.assertNumQueries(8): # No queries for bookmark table.
response = self.send_post(
client=self.client,
url=reverse('bookmarks'),
data={},
expected_status=400
)
self.assertEqual(response.data['user_message'], u'An error has occurred. Please try again.')
self.assertEqual(response.data['developer_message'], u'No data provided.')
def test_post_bookmark_for_non_existing_block(self):
"""
Test that posting a bookmark for a block that does not exist returns a 400.
"""
response = self.send_post(
client=self.client,
url=reverse('bookmarks'),
data={'usage_id': 'i4x://arbi/100/html/340ef1771a094090ad260ec940d04a21'},
expected_status=400
)
self.assertEqual(
response.data['user_message'],
u'An error has occurred. Please try again.'
)
self.assertEqual(
response.data['developer_message'],
u'Block with usage_id: i4x://arbi/100/html/340ef1771a094090ad260ec940d04a21 not found.'
)
@patch('django.conf.settings.MAX_BOOKMARKS_PER_COURSE', 5)
def test_post_bookmark_when_max_bookmarks_already_exist(self):
"""
Test that posting a bookmark for a block that does not exist returns a 400.
"""
max_bookmarks = settings.MAX_BOOKMARKS_PER_COURSE
__, blocks, __ = self.create_course_with_bookmarks_count(max_bookmarks)
response = self.send_post(
client=self.client,
url=reverse('bookmarks'),
data={'usage_id': unicode(blocks[-1].location)},
expected_status=400
)
self.assertEqual(
response.data['user_message'],
u'You can create up to {0} bookmarks.'
u' You must remove some bookmarks before you can add new ones.'.format(max_bookmarks)
)
self.assertEqual(
response.data['developer_message'],
u'You can create up to {0} bookmarks.'
u' You must remove some bookmarks before you can add new ones.'.format(max_bookmarks)
)
def test_unsupported_methods(self):
"""
Test that DELETE and PUT are not supported.
"""
self.client.login(username=self.user.username, password=self.TEST_PASSWORD)
self.assertEqual(405, self.client.put(reverse('bookmarks')).status_code)
self.assertEqual(405, self.client.delete(reverse('bookmarks')).status_code)
@patch('eventtracking.tracker.emit')
@ddt.unpack
@ddt.data(
{'page_size': -1, 'expected_bookmarks_count': 2, 'expected_page_size': 10, 'expected_page_number': 1},
{'page_size': 0, 'expected_bookmarks_count': 2, 'expected_page_size': 10, 'expected_page_number': 1},
{'page_size': 999, 'expected_bookmarks_count': 2, 'expected_page_size': 100, 'expected_page_number': 1}
)
def test_listed_event_for_different_page_size_values(self, mock_tracker, page_size, expected_bookmarks_count,
expected_page_size, expected_page_number):
""" Test that edx.course.bookmark.listed event values are as expected for different page size values """
query_parameters = 'course_id={}&page_size={}'.format(urllib.quote(self.course_id), page_size)
self.send_get(client=self.client, url=reverse('bookmarks'), query_parameters=query_parameters)
self.assert_bookmark_event_emitted(
mock_tracker,
event_name='edx.bookmark.listed',
course_id=self.course_id,
list_type='per_course',
bookmarks_count=expected_bookmarks_count,
page_size=expected_page_size,
page_number=expected_page_number
)
@patch('openedx.core.djangoapps.bookmarks.views.eventtracking.tracker.emit')
def test_listed_event_for_page_number(self, mock_tracker):
""" Test that edx.course.bookmark.listed event values are as expected when we request a specific page number """
self.send_get(client=self.client, url=reverse('bookmarks'), query_parameters='page_size=2&page=2')
self.assert_bookmark_event_emitted(
mock_tracker,
event_name='edx.bookmark.listed',
list_type='all_courses',
bookmarks_count=3,
page_size=2,
page_number=2
)
@attr('shard_2')
@ddt.ddt
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Tests only valid in LMS')
class BookmarksDetailViewTests(BookmarksViewsTestsBase):
"""
This contains the tests for GET & DELETE methods of bookmark.views.BookmarksDetailView class
"""
@ddt.data(
('', False),
('fields=path,display_name', True)
)
@ddt.unpack
def test_get_bookmark_successfully(self, query_params, check_optional_fields):
"""
Test that requesting bookmark returns data with 200 code.
"""
response = self.send_get(
client=self.client,
url=reverse(
'bookmarks_detail',
kwargs={'username': self.user.username, 'usage_id': unicode(self.sequential_1.location)}
),
query_parameters=query_params
)
data = response.data
self.assertIsNotNone(data)
self.assert_bookmark_data_is_valid(self.bookmark_1, data, check_optional_fields=check_optional_fields)
def test_get_bookmark_that_belongs_to_other_user(self):
"""
Test that requesting bookmark that belongs to other user returns 404 status code.
"""
self.send_get(
client=self.client,
url=reverse(
'bookmarks_detail',
kwargs={'username': 'other', 'usage_id': unicode(self.vertical_1.location)}
),
expected_status=404
)
def test_get_bookmark_that_does_not_exist(self):
"""
Test that requesting bookmark that does not exist returns 404 status code.
"""
response = self.send_get(
client=self.client,
url=reverse(
'bookmarks_detail',
kwargs={'username': self.user.username, 'usage_id': 'i4x://arbi/100/html/340ef1771a0940'}
),
expected_status=404
)
self.assertEqual(
response.data['user_message'],
'Bookmark with usage_id: i4x://arbi/100/html/340ef1771a0940 does not exist.'
)
self.assertEqual(
response.data['developer_message'],
'Bookmark with usage_id: i4x://arbi/100/html/340ef1771a0940 does not exist.'
)
def test_get_bookmark_with_invalid_usage_id(self):
"""
Test that requesting bookmark with invalid usage id returns 400.
"""
response = self.send_get(
client=self.client,
url=reverse(
'bookmarks_detail',
kwargs={'username': self.user.username, 'usage_id': 'i4x'}
),
expected_status=404
)
self.assertEqual(response.data['user_message'], u'Invalid usage_id: i4x.')
def test_anonymous_access(self):
"""
Test that an anonymous client (not logged in) cannot call GET or DELETE.
"""
url = reverse('bookmarks_detail', kwargs={'username': self.user.username, 'usage_id': 'i4x'})
self.send_get(
client=self.anonymous_client,
url=url,
expected_status=401
)
self.send_delete(
client=self.anonymous_client,
url=url,
expected_status=401
)
def test_delete_bookmark_successfully(self):
"""
Test that delete bookmark returns 204 status code with success.
"""
query_parameters = 'course_id={}'.format(urllib.quote(self.course_id))
response = self.send_get(client=self.client, url=reverse('bookmarks'), query_parameters=query_parameters)
bookmarks_data = response.data['results']
self.assertEqual(len(bookmarks_data), 2)
self.send_delete(
client=self.client,
url=reverse(
'bookmarks_detail',
kwargs={'username': self.user.username, 'usage_id': unicode(self.sequential_1.location)}
)
)
response = self.send_get(client=self.client, url=reverse('bookmarks'), query_parameters=query_parameters)
bookmarks_data = response.data['results']
self.assertEqual(len(bookmarks_data), 1)
def test_delete_bookmark_that_belongs_to_other_user(self):
"""
Test that delete bookmark that belongs to other user returns 404.
"""
self.send_delete(
client=self.client,
url=reverse(
'bookmarks_detail',
kwargs={'username': 'other', 'usage_id': unicode(self.vertical_1.location)}
),
expected_status=404
)
def test_delete_bookmark_that_does_not_exist(self):
"""
Test that delete bookmark that does not exist returns 404.
"""
response = self.send_delete(
client=self.client,
url=reverse(
'bookmarks_detail',
kwargs={'username': self.user.username, 'usage_id': 'i4x://arbi/100/html/340ef1771a0940'}
),
expected_status=404
)
self.assertEqual(
response.data['user_message'],
u'Bookmark with usage_id: i4x://arbi/100/html/340ef1771a0940 does not exist.'
)
self.assertEqual(
response.data['developer_message'],
'Bookmark with usage_id: i4x://arbi/100/html/340ef1771a0940 does not exist.'
)
def test_delete_bookmark_with_invalid_usage_id(self):
"""
Test that delete bookmark with invalid usage id returns 400.
"""
response = self.send_delete(
client=self.client,
url=reverse(
'bookmarks_detail',
kwargs={'username': self.user.username, 'usage_id': 'i4x'}
),
expected_status=404
)
self.assertEqual(response.data['user_message'], u'Invalid usage_id: i4x.')
def test_unsupported_methods(self):
"""
Test that POST and PUT are not supported.
"""
url = reverse('bookmarks_detail', kwargs={'username': self.user.username, 'usage_id': 'i4x'})
self.client.login(username=self.user.username, password=self.TEST_PASSWORD)
self.assertEqual(405, self.client.put(url).status_code)
self.assertEqual(405, self.client.post(url).status_code)
| agpl-3.0 |
maellak/invenio | modules/miscutil/lib/upgrades/invenio_2012_12_05_oaiHARVEST_arguments_blob.py | 19 | 2408 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.dbquery import run_sql, \
serialize_via_marshal, \
deserialize_via_marshal, \
OperationalError
depends_on = ['invenio_release_1_1_0']
def info():
return "Change of oaiHARVEST.arguments storage to blob and add DEFAULT NOT NULL to bibconvertcfgfile"
def do_upgrade():
create_statement = run_sql('SHOW CREATE TABLE oaiHARVEST')[0][1]
if '`arguments` text' in create_statement:
run_sql("ALTER TABLE oaiHARVEST CHANGE arguments arguments blob")
# translate old values
if '`bibconvertcfgfile`' in create_statement:
rows_to_change = run_sql("SELECT id, bibconvertcfgfile, bibfilterprogram, arguments FROM oaiHARVEST", with_dict=True)
# Move away from old columns
for row in rows_to_change:
if row['arguments']:
arguments = deserialize_via_marshal(row['arguments'])
else:
arguments = {}
arguments['c_cfg-file'] = row['bibconvertcfgfile']
arguments['f_filter-file'] = row['bibfilterprogram']
run_sql("UPDATE oaiHARVEST set arguments=%s WHERE id=%s", (serialize_via_marshal(arguments), row['id']))
run_sql("ALTER TABLE oaiHARVEST DROP COLUMN bibconvertcfgfile")
run_sql("ALTER TABLE oaiHARVEST DROP COLUMN bibfilterprogram")
def estimate():
""" Estimate running time of upgrade in seconds (optional). """
count_rows = run_sql("SELECT COUNT(*) FROM oaiHARVEST")[0][0]
return count_rows / 20
def pre_upgrade():
pass
def post_upgrade():
pass
| gpl-2.0 |
TvBMcMaster/pymeasure | pymeasure/instruments/thorlabs/thorlabspm100usb.py | 2 | 4089 | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2017 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
from pymeasure.instruments import Instrument, RangeException
class ThorlabsPM100USB(Instrument):
"""Represents Thorlabs PM100USB powermeter"""
# TODO: refactor to check if the sensor wavelength is adjustable
wavelength = Instrument.control("SENSE:CORR:WAV?", "SENSE:CORR:WAV %g",
"Wavelength in nm; not set outside of range")
# TODO: refactor to check if the sensor is a power sensor
power = Instrument.measurement("MEAS:POW?", "Power, in Watts")
wavelength_min = Instrument.measurement("SENS:CORR:WAV? MIN", "Get minimum wavelength, in nm")
wavelength_max = Instrument.measurement("SENS:CORR:WAV? MAX", "Get maximum wavelength, in nm")
def __init__(self, adapter, **kwargs):
super(ThorlabsPM100USB, self).__init__(
adapter, "ThorlabsPM100USB powermeter", **kwargs)
self.timout = 3000
self.sensor()
def measure_power(self, wavelength):
"""Set wavelength in nm and get power in W
If wavelength is out of range it will be set to range limit"""
if wavelength < self.wavelength_min:
raise RangeException("Wavelength %.2f nm out of range: using minimum wavelength: %.2f nm" % (
wavelength, self.wavelength_min))
# explicit setting wavelenghth, althought it would be automatically set
wavelength = self.wavelength_min
if wavelength > self.wavelength_max:
raise RangeException("Wavelength %.2f nm out of range: using maximum wavelength: %.2f nm" % (
wavelength, self.wavelength_max))
wavelength = self.wavelength_max
self.wavelength = wavelength
return self.power
def sensor(self):
"Get sensor info"
response = self.ask("SYST:SENSOR:IDN?").split(',')
self.sensor_name = response[0]
self.sensor_sn = response[1]
self.sensor_cal_msg = response[2]
self.sensor_type = response[3]
self.sensor_subtype = response[4]
self._flags_str = response[-1][:-1]
# interpretation of the flags
# rough trick using bin repr, maybe something more elegant exixts
# (bitshift, bitarray?)
self._flags = tuple(
map(lambda x: x == '1', bin(int(self._flags_str))[2:]))
# setting the flags; _dn are empty
self.is_power, self.is_energy, _d4, _d8, \
self.resp_settable, self.wavelength_settable, self.tau_settable, _d128, self.temperature_sens = self._flags
@property
def energy(self):
if self.is_energy:
return self.values("MEAS:ENER?")
else:
raise Exception("%s is not an energy sensor" % self.sensor_name)
return 0
@energy.setter
def energy(self, val):
raise Exception("Energy not settable!")
| mit |
antoniotre86/IERCT | ierct/baselines/metamap.py | 1 | 1902 | '''
Created on 29 Dec 2014
@author: Antonio
'''
from py4j.java_gateway import JavaGateway
from py4j.protocol import Py4JNetworkError
import re
import os
class Metamap:
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.genericObject = self._start_gateway()
self.semmap = self._load_sem_map()
def _start_gateway(self):
self._gateway = JavaGateway()
return self._gateway.entry_point.getGenericObject()
def _submit(self):
try:
return self.genericObject.handleSubmission()
except Py4JNetworkError as e:
raise e
def _load_sem_map(self):
sm = {}
with open(r"C:\Users\Antonio\workspace\metamap\SemanticTypes_2013AA.txt", 'rb') as foo:
for line in foo.readlines():
lline = line.strip().split('|')
sm[lline[0]] = lline[2]
return sm
def process_file(self, filename):
self.genericObject.setFileField("UpLoad_File", os.path.abspath(filename))
out = self._submit()
return out
def process_text(self, text):
'''
NOT WORKING
:param text:
'''
self.genericObject.setField("APIText", text)
out = self._submit()
return out
def parse_result(self, result):
pt0 = '[0-9]+\s+(.+\[.+\])'
pt1 = '\[(.+)\]'
pt2 = '\(.+\)'
d = {}
for i in re.findall(pt0, result):
j = re.sub(pt2,'',i)
t = re.sub(pt1,'',j).lower()
t = re.sub('\s+$','',t)
c = re.findall(pt1,j)[0]
if not d.has_key(t):
d[t] = c
return d
| gpl-2.0 |
zxc2694/ov_test | program/pythonGUI/gui2.py | 3 | 2440 | ################################################################################
# File name: gui.py
#
# Function: Display three data from stm32f4 using Python (matplotlib)
# The three data is roll, pith, yall angle of quadcopter attitude.
#
# Reference:http://electronut.in/plotting-real-time-data-from-arduino-using-python/
#
################################################################################
import sys, serial
import numpy as np
from time import sleep
from collections import deque
from matplotlib import pyplot as plt
# class that holds analog data for N samples
class AnalogData:
# constr
def __init__(self, maxLen):
self.ax = deque([0.0]*maxLen)
self.ay = deque([0.0]*maxLen)
self.maxLen = maxLen
# ring buffer
def addToBuf(self, buf, val):
if len(buf) < self.maxLen:
buf.append(val)
else:
buf.pop()
buf.appendleft(val)
#Add new data
def add(self, data):
assert(len(data) == 2)
self.addToBuf(self.ax, data[0])
self.addToBuf(self.ay, data[1])
# plot class
class AnalogPlot:
# constr
def __init__(self, analogData):
# set plot to animated
plt.ion()
plt.figure(figsize=(9,8))
self.axline, = plt.plot(analogData.ax,label="Roll",color="red")
self.ayline, = plt.plot(analogData.ay,label="Pitch",color="blue")
plt.xlabel("Time")
plt.ylabel("Angle(-90~+90)")
plt.title("Quadcopter attitude")
plt.legend() #Show label figure.
plt.ylim([-90, 90]) # Vertical axis scale.
plt.grid()
# update plot
def update(self, analogData):
self.axline.set_ydata(analogData.ax)
self.ayline.set_ydata(analogData.ay)
plt.draw()
def main():
# expects 1 arg - serial port string
if(len(sys.argv) != 2):
print "Type:"
print "sudo chmod 777 /dev/ttyUSB0"
print "python gui2.py '/dev/ttyUSB0'"
exit(1)
#strPort = '/dev/tty.usbserial-A7006Yqh'
strPort = sys.argv[1];
# plot parameters
analogData = AnalogData(200) # Horizontal axis scale.
analogPlot = AnalogPlot(analogData)
print "plotting data..."
a = 1
# open serial port
ser = serial.Serial(strPort, 9600)
while True:
try:
line = ser.readline()
data = [float(val) for val in line.split()]
if (a < 10):
a = a + 1
else:
print data[0] , data[1]
if(len(data) == 2):
analogData.add(data)
analogPlot.update(analogData)
except KeyboardInterrupt:
print "exiting"
break
# close serial
ser.flush()
ser.close()
# call main
if __name__ == '__main__':
main()
| mit |
ddico/server-tools | base_module_doc_rst/wizard/__init__.py | 23 | 1046 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import tech_rst_guide
from . import generate_relation_graph
| agpl-3.0 |
appi147/Jarvis | jarviscli/plugins/voice_control.py | 2 | 2629 | import os
from plugin import plugin, require
voice_control_installed = True
try:
import speech_recognition as sr
import pyaudio
except ImportError:
voice_control_installed = False
if voice_control_installed:
requirements = []
else:
requirements = [
'voice_control_requirements (install portaudio + re-run setup.sh)']
@require(native=requirements)
@plugin("hear")
def hear(jarvis, s):
r = sr.Recognizer() # intializing the speech_recognition
listen = False
_jarvis = jarvis._jarvis # calling jarvis object.
_jarvis.speech.text_to_speech("Say listen to start voice mode")
while listen is False:
try:
with sr.Microphone() as source:
os.system('reset') # for clearing the terminal.
print("Say listen to start listening")
r.adjust_for_ambient_noise(source) # Eleminating the noise.
audio = r.listen(source) # Storing audio.
pinger = r.recognize_google(audio) # Converting speech to text
try:
if (pinger.lower() == "listen"):
listen = True
_jarvis.speech.text_to_speech("Voice mode activated")
print("Voice mode activated. Say something!")
break
else:
continue
except LookupError:
continue # For ignoring if your are not speaking anything.
except sr.UnknownValueError:
continue # For ignoring the unreconized words error
while listen is True:
print("Say somthing")
try:
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
pinger = r.recognize_google(audio).lower()
if (pinger == "stop"):
listen = False
print("Listening stopped.")
_jarvis.speech.text_to_speech("Listening stopped.")
break
else:
print(pinger)
if listen:
line = pinger
jarvis.eval(line)
except LookupError:
_jarvis.speech.text_to_speech('Audio cannot be read!')
print("Could not understand audio")
_jarvis.speech.text_to_speech("unable to recognize voice")
except sr.UnknownValueError:
continue
except sr.RequestError:
print("Could not request results from Google Recognition service")
continue # It will ignore connecting server error.
| mit |
eugena/django-mere-feedback | setup.py | 1 | 1589 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import feedback
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = feedback.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on github:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-mere-feedback',
version=version,
description="""Django feedback""",
long_description=readme + '\n\n' + history,
author='Eugena Mihailikova',
author_email='[email protected]',
url='https://github.com/eugena/django-mere-feedback',
packages=[
'feedback',
],
include_package_data=True,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='django feedback form',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| bsd-3-clause |
john5223/airflow | docs/conf.py | 36 | 8474 | # -*- coding: utf-8 -*-
#
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
from airflow import settings
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, settings.AIRFLOW_HOME + "/src/airflow")
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinxarg.ext',
]
viewcode_import = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Airflow'
copyright = u'2014, Maxime Beauchemin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Airflowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Airflow.tex', u'Airflow Documentation',
u'Maxime Beauchemin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'airflow', u'Airflow Documentation',
[u'Maxime Beauchemin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
'index', 'Airflow', u'Airflow Documentation',
u'Maxime Beauchemin', 'Airflow',
'Airflow is a system to programmaticaly author, schedule and monitor data pipelines.',
'Miscellaneous'
),]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 |
CHrycyna/LandscapeTracker | tests/test_user.py | 1 | 1877 | from test_helper import TestHelper
from app import app, db
from app.models.user import User
class TestUser(TestHelper):
def test_retrieve_user(self):
u = User(username = 'andy', password = "123", email = '[email protected]')
db.session.add(u)
db.session.commit()
user = User.find_by_username('andy')
assert user.username == 'andy'
def test_find_by_id(self):
u = User(username = 'jack_nicholson', password = "123", email =
'[email protected]')
User.save_to_db(u)
user = User.find_by_id(1)
assert user != None
def test_find_all(self):
u = User(username = 'jack_nicholson', password = "123", email =
'[email protected]')
User.save_to_db(u)
u = User(username = 'random_person', password = "123", email =
'[email protected]')
User.save_to_db(u)
u = User(username = 'jason_spezza', password = "123", email =
'[email protected]')
User.save_to_db(u)
assert len(User.all()) == 3
def test_inserting_duplicate_username(self):
u = User(username = 'jack_nicholson', password = "123", email =
'[email protected]')
User.save_to_db(u);
u2 = User(username = 'jack_nicholson', password = "123", email =
'[email protected]')
assert User.save_to_db(u2) == False
assert len(User.all()) == 1
def test_empty_username(self):
u = User(username = '', password = "123", email =
'[email protected]')
assert User.save_to_db(u) == False
assert len(User.all()) == 0
def test_empty_password(self):
u = User(username = 'jason_spezza', password = "", email =
'[email protected]')
assert User.save_to_db(u) == False
assert len(User.all()) == 0
| mit |
ronakkhunt/kuma | kuma/attachments/tests/test_views.py | 21 | 13816 | import datetime
from nose.tools import eq_, ok_
from django.conf import settings
from django.core.files.base import ContentFile
from django.core.files import temp as tempfile
from django.utils.http import parse_http_date_safe
from constance import config
from jingo.helpers import urlparams
from kuma.users.tests import UserTestCase
from kuma.wiki.models import Document, DocumentAttachment
from kuma.wiki.tests import document, revision, WikiTestCase
from kuma.core.urlresolvers import reverse
from ..models import Attachment, AttachmentRevision
from ..utils import make_test_file
class AttachmentTests(UserTestCase, WikiTestCase):
def setUp(self):
self.old_allowed_types = config.WIKI_ATTACHMENT_ALLOWED_TYPES
config.WIKI_ATTACHMENT_ALLOWED_TYPES = 'text/plain'
super(AttachmentTests, self).setUp()
self.client.login(username='admin', password='testpass')
def tearDown(self):
super(AttachmentTests, self).tearDown()
config.WIKI_ATTACHMENT_ALLOWED_TYPES = self.old_allowed_types
def _post_new_attachment(self):
file_for_upload = make_test_file(
content='A test file uploaded into kuma.')
post_data = {
'title': 'Test uploaded file',
'description': 'A test file uploaded into kuma.',
'comment': 'Initial upload',
'file': file_for_upload,
}
resp = self.client.post(reverse('attachments.new_attachment'), data=post_data)
return resp
def test_legacy_redirect(self):
test_user = self.user_model.objects.get(username='testuser2')
test_file_content = 'Meh meh I am a test file.'
test_files = (
{'file_id': 97, 'filename': 'Canvas_rect.png',
'title': 'Canvas rect', 'slug': 'canvas-rect'},
{'file_id': 107, 'filename': 'Canvas_smiley.png',
'title': 'Canvas smiley', 'slug': 'canvas-smiley'},
{'file_id': 86, 'filename': 'Canvas_lineTo.png',
'title': 'Canvas lineTo', 'slug': 'canvas-lineto'},
{'file_id': 55, 'filename': 'Canvas_arc.png',
'title': 'Canvas arc', 'slug': 'canvas-arc'},
)
for f in test_files:
a = Attachment(title=f['title'], slug=f['slug'],
mindtouch_attachment_id=f['file_id'])
a.save()
now = datetime.datetime.now()
r = AttachmentRevision(
attachment=a,
mime_type='text/plain',
title=f['title'],
slug=f['slug'],
description='',
created=now,
is_approved=True)
r.creator = test_user
r.file.save(f['filename'], ContentFile(test_file_content))
r.make_current()
mindtouch_url = reverse('attachments.mindtouch_file_redirect',
args=(),
kwargs={'file_id': f['file_id'],
'filename': f['filename']})
resp = self.client.get(mindtouch_url)
eq_(301, resp.status_code)
ok_(a.get_file_url() in resp['Location'])
def test_new_attachment(self):
resp = self._post_new_attachment()
eq_(302, resp.status_code)
attachment = Attachment.objects.get(title='Test uploaded file')
eq_(resp['Location'],
'http://testserver%s' % attachment.get_absolute_url())
rev = attachment.current_revision
eq_('admin', rev.creator.username)
eq_('A test file uploaded into kuma.', rev.description)
eq_('Initial upload', rev.comment)
ok_(rev.is_approved)
def test_edit_attachment(self):
file_for_upload = make_test_file(
content='I am a test file for editing.')
post_data = {
'title': 'Test editing file',
'description': 'A test file for editing.',
'comment': 'Initial upload',
'file': file_for_upload,
}
resp = self.client.post(reverse('attachments.new_attachment'), data=post_data)
tdir = tempfile.gettempdir()
edited_file_for_upload = tempfile.NamedTemporaryFile(suffix=".txt",
dir=tdir)
edited_file_for_upload.write(
'I am a new version of the test file for editing.')
edited_file_for_upload.seek(0)
post_data = {
'title': 'Test editing file',
'description': 'A test file for editing.',
'comment': 'Second revision.',
'file': edited_file_for_upload,
}
attachment = Attachment.objects.get(title='Test editing file')
resp = self.client.post(reverse('attachments.edit_attachment',
kwargs={
'attachment_id': attachment.id,
}),
data=post_data)
eq_(302, resp.status_code)
# Re-fetch because it's been updated.
attachment = Attachment.objects.get(title='Test editing file')
eq_(resp['Location'],
'http://testserver%s' % attachment.get_absolute_url())
eq_(2, attachment.revisions.count())
rev = attachment.current_revision
eq_('admin', rev.creator.username)
eq_('Second revision.', rev.comment)
ok_(rev.is_approved)
url = attachment.get_file_url()
resp = self.client.get(url, HTTP_HOST=settings.ATTACHMENT_HOST)
eq_('text/plain', rev.mime_type)
ok_('I am a new version of the test file for editing.' in resp.content)
def test_attachment_raw_requires_attachment_host(self):
resp = self._post_new_attachment()
attachment = Attachment.objects.get(title='Test uploaded file')
url = attachment.get_file_url()
resp = self.client.get(url)
eq_(301, resp.status_code)
eq_(attachment.get_file_url(), resp['Location'])
url = attachment.get_file_url()
resp = self.client.get(url, HTTP_HOST=settings.ATTACHMENT_HOST)
eq_('ALLOW-FROM: %s' % settings.DOMAIN, resp['x-frame-options'])
eq_(200, resp.status_code)
ok_('Last-Modified' in resp)
ok_('1970' not in resp['Last-Modified'])
ok_('GMT' in resp['Last-Modified'])
ok_(parse_http_date_safe(resp['Last-Modified']) is not None)
def test_attachment_detail(self):
file_for_upload = make_test_file(
content='I am a test file for attachment detail view.')
post_data = {
'title': 'Test file for viewing',
'description': 'A test file for viewing.',
'comment': 'Initial upload',
'file': file_for_upload,
}
resp = self.client.post(reverse('attachments.new_attachment'), data=post_data)
attachment = Attachment.objects.get(title='Test file for viewing')
resp = self.client.get(reverse('attachments.attachment_detail',
kwargs={
'attachment_id': attachment.id,
}))
eq_(200, resp.status_code)
def test_get_previous(self):
"""
AttachmentRevision.get_previous() should return this revisions's
files's most recent approved revision."""
test_user = self.user_model.objects.get(username='testuser2')
a = Attachment(title='Test attachment for get_previous',
slug='test-attachment-for-get-previous')
a.save()
r = AttachmentRevision(
attachment=a,
mime_type='text/plain',
title=a.title,
slug=a.slug,
description='',
comment='Initial revision.',
created=datetime.datetime.now() - datetime.timedelta(seconds=30),
creator=test_user,
is_approved=True)
r.file.save('get_previous_test_file.txt',
ContentFile('I am a test file for get_previous'))
r.save()
r.make_current()
r2 = AttachmentRevision(
attachment=a,
mime_type='text/plain',
title=a.title,
slug=a.slug,
description='',
comment='First edit..',
created=datetime.datetime.now(),
creator=test_user,
is_approved=True)
r2.file.save('get_previous_test_file.txt',
ContentFile('I am a test file for get_previous'))
r2.save()
r2.make_current()
eq_(r, r2.get_previous())
def test_mime_type_filtering(self):
"""Don't allow uploads outside of the explicitly-permitted
mime-types."""
# SLIGHT HACK: this requires the default set of allowed
# mime-types specified in settings.py. Specifically, adding
# 'text/html' to that set will make this test fail.
test_user = self.user_model.objects.get(username='testuser2')
a = Attachment(title='Test attachment for file type filter',
slug='test-attachment-for-file-type-filter')
a.save()
r = AttachmentRevision(
attachment=a,
mime_type='text/plain',
title=a.title,
slug=a.slug,
description='',
comment='Initial revision.',
created=datetime.datetime.now() - datetime.timedelta(seconds=30),
creator=test_user,
is_approved=True)
r.file.save('mime_type_filter_test_file.txt',
ContentFile('I am a test file for mime-type filtering'))
# Shamelessly stolen from Django's own file-upload tests.
tdir = tempfile.gettempdir()
file_for_upload = tempfile.NamedTemporaryFile(suffix=".html",
dir=tdir)
file_for_upload.write('<html>I am a file that tests'
'mime-type filtering.</html>.')
file_for_upload.seek(0)
post_data = {
'title': 'Test disallowed file type',
'description': 'A file kuma should disallow on type.',
'comment': 'Initial upload',
'file': file_for_upload,
}
resp = self.client.post(reverse('attachments.edit_attachment',
kwargs={'attachment_id': a.id}),
data=post_data)
eq_(200, resp.status_code)
ok_('Files of this type are not permitted.' in resp.content)
def test_intermediate(self):
"""
Test that the intermediate DocumentAttachment gets created
correctly when adding an Attachment with a document_id.
"""
doc = document(locale='en', slug='attachment-test-intermediate')
doc.save()
rev = revision(document=doc, is_approved=True)
rev.save()
file_for_upload = make_test_file(
content='A file for testing intermediate attachment model.')
post_data = {
'title': 'Intermediate test file',
'description': 'Intermediate test file',
'comment': 'Initial upload',
'file': file_for_upload,
}
add_url = urlparams(reverse('attachments.new_attachment'),
document_id=doc.id)
resp = self.client.post(add_url, data=post_data)
eq_(302, resp.status_code)
eq_(1, doc.files.count())
intermediates = DocumentAttachment.objects.filter(document__pk=doc.id)
eq_(1, intermediates.count())
intermediate = intermediates[0]
eq_('admin', intermediate.attached_by.username)
eq_(file_for_upload.name.split('/')[-1], intermediate.name)
def test_files_dict(self):
doc = document(locale='en', slug='attachment-test-files-dict')
doc.save()
rev = revision(document=doc, is_approved=True)
rev.save()
test_file_1 = make_test_file(
content='A file for testing the files dict')
post_data = {
'title': 'Files dict test file',
'description': 'Files dict test file',
'comment': 'Initial upload',
'file': test_file_1,
}
add_url = urlparams(reverse('attachments.new_attachment'),
document_id=doc.id)
self.client.post(add_url, data=post_data)
test_file_2 = make_test_file(
content='Another file for testing the files dict')
post_data = {
'title': 'Files dict test file 2',
'description': 'Files dict test file 2',
'comment': 'Initial upload',
'file': test_file_2,
}
self.client.post(add_url, data=post_data)
doc = Document.objects.get(pk=doc.id)
files_dict = doc.files_dict()
file1 = files_dict[test_file_1.name.split('/')[-1]]
eq_('admin', file1['attached_by'])
eq_('Files dict test file', file1['description'])
eq_('text/plain', file1['mime_type'])
ok_(test_file_1.name.split('/')[-1] in file1['url'])
file2 = files_dict[test_file_2.name.split('/')[-1]]
eq_('admin', file2['attached_by'])
eq_('Files dict test file 2', file2['description'])
eq_('text/plain', file2['mime_type'])
ok_(test_file_2.name.split('/')[-1] in file2['url'])
def test_list_files(self):
list_files_url = reverse('attachments.list_files',
locale=settings.WIKI_DEFAULT_LANGUAGE)
resp = self.client.get(list_files_url)
eq_(200, resp.status_code)
ok_('All Files' in resp.content)
| mpl-2.0 |
vtexier/duniter-python-api | tests/documents/test_peer.py | 1 | 4506 | import unittest
from duniterpy.documents.peer import Peer, BMAEndpoint, UnknownEndpoint, WS2PEndpoint
rawpeer = """Version: 2
Type: Peer
Currency: beta_brousouf
PublicKey: HsLShAtzXTVxeUtQd7yi5Z5Zh4zNvbu8sTEZ53nfKcqY
Block: 8-1922C324ABC4AF7EF7656734A31F5197888DDD52
Endpoints:
BASIC_MERKLED_API some.dns.name 88.77.66.55 2001:42d0:52:a00::648 9001
BASIC_MERKLED_API some.dns.name 88.77.66.55 2001:42d0:52:a00::648 9002
WS2P d2edcb92 g1-test.duniter.org 20902
OTHER_PROTOCOL 88.77.66.55 9001
dkaXIiCYUJtCg8Feh/BKvPYf4uFH9CJ/zY6J4MlA9BsjmcMe4YAblvNt/gJy31b1aGq3ue3h14mLMCu84rraDg==
"""
test_weird_ipv6_peer = """Version: 10
Type: Peer
Currency: g1
PublicKey: 6fFt4zdvtNyVcfJn7Y41mKLmMDizyK3nVeNW3qdDXzpc
Block: 18198-000004AC710E04D8015ED6CA5D87D4B6620A7551233FFEE1B521FF756CE3B9CD
Endpoints:
BASIC_MERKLED_API duniter.aquilenet.fr 141.255.128.35 2a01:474::35 10901
BMAS duniter.aquilenet.fr 443
dkaXIiCYUJtCg8Feh/BKvPYf4uFH9CJ/zY6J4MlA9BsjmcMe4YAblvNt/gJy31b1aGq3ue3h14mLMCu84rraDg==
"""
class TestPeer(unittest.TestCase):
def test_fromraw(self):
peer = Peer.from_signed_raw(rawpeer)
self.assertEqual(peer.currency, "beta_brousouf")
self.assertEqual(peer.pubkey, "HsLShAtzXTVxeUtQd7yi5Z5Zh4zNvbu8sTEZ53nfKcqY")
self.assertEqual(str(peer.blockUID), "8-1922C324ABC4AF7EF7656734A31F5197888DDD52")
self.assertEqual(len(peer.endpoints), 4)
self.assertIsInstance(peer.endpoints[0], BMAEndpoint)
self.assertIsInstance(peer.endpoints[1], BMAEndpoint)
self.assertIsInstance(peer.endpoints[2], WS2PEndpoint)
self.assertIsInstance(peer.endpoints[3], UnknownEndpoint)
self.assertEqual(peer.endpoints[0].server, "some.dns.name")
self.assertEqual(peer.endpoints[0].ipv4, "88.77.66.55")
self.assertEqual(peer.endpoints[0].ipv6, "2001:42d0:52:a00::648")
self.assertEqual(peer.endpoints[0].port, 9001)
self.assertEqual(peer.endpoints[1].server, "some.dns.name")
self.assertEqual(peer.endpoints[1].ipv4, "88.77.66.55")
self.assertEqual(peer.endpoints[1].ipv6, "2001:42d0:52:a00::648")
self.assertEqual(peer.endpoints[1].port, 9002)
self.assertEqual(peer.endpoints[2].server, "g1-test.duniter.org")
self.assertEqual(peer.endpoints[2].ws2pid, "d2edcb92")
self.assertEqual(peer.endpoints[2].port, 20902)
self.assertEqual(peer.signatures[0], "dkaXIiCYUJtCg8Feh/BKvPYf4uFH9CJ/zY6J4MlA9BsjmcMe4YAblvNt/gJy31b1aGq3ue3h14mLMCu84rraDg==")
def test_fromraw_toraw(self):
peer = Peer.from_signed_raw(rawpeer)
rendered_peer = peer.signed_raw()
from_rendered_peer = Peer.from_signed_raw(rendered_peer)
self.assertEqual(from_rendered_peer.currency, "beta_brousouf")
self.assertEqual(from_rendered_peer.pubkey, "HsLShAtzXTVxeUtQd7yi5Z5Zh4zNvbu8sTEZ53nfKcqY")
self.assertEqual(str(from_rendered_peer.blockUID), "8-1922C324ABC4AF7EF7656734A31F5197888DDD52")
self.assertEqual(len(peer.endpoints), 4)
self.assertIsInstance(peer.endpoints[0], BMAEndpoint)
self.assertIsInstance(peer.endpoints[1], BMAEndpoint)
self.assertIsInstance(peer.endpoints[2], WS2PEndpoint)
self.assertIsInstance(peer.endpoints[3], UnknownEndpoint)
self.assertEqual(from_rendered_peer.endpoints[0].server, "some.dns.name")
self.assertEqual(from_rendered_peer.endpoints[0].ipv4, "88.77.66.55")
self.assertEqual(from_rendered_peer.endpoints[0].ipv6, "2001:42d0:52:a00::648")
self.assertEqual(from_rendered_peer.endpoints[0].port, 9001)
self.assertEqual(from_rendered_peer.endpoints[1].server, "some.dns.name")
self.assertEqual(from_rendered_peer.endpoints[1].ipv4, "88.77.66.55")
self.assertEqual(from_rendered_peer.endpoints[1].ipv6, "2001:42d0:52:a00::648")
self.assertEqual(from_rendered_peer.endpoints[1].port, 9002)
self.assertEqual(peer.endpoints[2].server, "g1-test.duniter.org")
self.assertEqual(peer.endpoints[2].ws2pid, "d2edcb92")
self.assertEqual(peer.endpoints[2].port, 20902)
self.assertEqual(from_rendered_peer.signatures[0], "dkaXIiCYUJtCg8Feh/BKvPYf4uFH9CJ/zY6J4MlA9BsjmcMe4YAblvNt/gJy31b1aGq3ue3h14mLMCu84rraDg==")
self.assertEqual(rawpeer, from_rendered_peer.signed_raw())
def test_incorrect(self):
peer = Peer.from_signed_raw(test_weird_ipv6_peer)
rendered_peer = peer.signed_raw()
from_rendered_peer = Peer.from_signed_raw(rendered_peer)
| gpl-3.0 |
ecaldwe1/zika | website/apps/home/views/upload_job_view.py | 2 | 1693 | #!/bin/env python3.4
# -*- coding: utf-8 -*-
#
# This file is part of the VecNet Zika modeling interface.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/vecnet/zika
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.core.exceptions import PermissionDenied
from django.http.response import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from website.apps.home.models import UploadJob
from website.apps.home.serializers import UploadJobSerializer
@csrf_exempt
def upload_job_view(request, pk):
"""
Retrieve, update or delete an upload job
"""
if not request.user.is_superuser:
raise PermissionDenied
try:
upload_job = UploadJob.objects.get(pk=pk)
except UploadJob.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
serializer = UploadJobSerializer(upload_job)
return JsonResponse(serializer.data)
elif request.method == 'PUT':
data = JSONParser().parse(request)
serializer = UploadJobSerializer(upload_job, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=400)
elif request.method == 'DELETE':
upload_job.delete()
return HttpResponse(status=204)
| mpl-2.0 |
kmike/scrapy | scrapy/utils/engine.py | 180 | 1376 | """Some debugging functions for working with the Scrapy engine"""
from __future__ import print_function
from time import time # used in global tests code
def get_engine_status(engine):
"""Return a report of the current engine status"""
tests = [
"time()-engine.start_time",
"engine.has_capacity()",
"len(engine.downloader.active)",
"engine.scraper.is_idle()",
"engine.spider.name",
"engine.spider_is_idle(engine.spider)",
"engine.slot.closing",
"len(engine.slot.inprogress)",
"len(engine.slot.scheduler.dqs or [])",
"len(engine.slot.scheduler.mqs)",
"len(engine.scraper.slot.queue)",
"len(engine.scraper.slot.active)",
"engine.scraper.slot.active_size",
"engine.scraper.slot.itemproc_size",
"engine.scraper.slot.needs_backout()",
]
checks = []
for test in tests:
try:
checks += [(test, eval(test))]
except Exception as e:
checks += [(test, "%s (exception)" % type(e).__name__)]
return checks
def format_engine_status(engine=None):
checks = get_engine_status(engine)
s = "Execution engine status\n\n"
for test, result in checks:
s += "%-47s : %s\n" % (test, result)
s += "\n"
return s
def print_engine_status(engine):
print(format_engine_status(engine))
| bsd-3-clause |
wildtetris/python-social-auth | examples/flask_example/settings.py | 51 | 2264 | from os.path import dirname, abspath
SECRET_KEY = 'random-secret-key'
SESSION_COOKIE_NAME = 'psa_session'
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:////%s/test.db' % dirname(abspath(__file__))
DEBUG_TB_INTERCEPT_REDIRECTS = False
SESSION_PROTECTION = 'strong'
SOCIAL_AUTH_LOGIN_URL = '/'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/done/'
SOCIAL_AUTH_USER_MODEL = 'flask_example.models.user.User'
SOCIAL_AUTH_AUTHENTICATION_BACKENDS = (
'social.backends.open_id.OpenIdAuth',
'social.backends.google.GoogleOpenId',
'social.backends.google.GoogleOAuth2',
'social.backends.google.GoogleOAuth',
'social.backends.twitter.TwitterOAuth',
'social.backends.yahoo.YahooOpenId',
'social.backends.stripe.StripeOAuth2',
'social.backends.persona.PersonaAuth',
'social.backends.facebook.FacebookOAuth2',
'social.backends.facebook.FacebookAppOAuth2',
'social.backends.yahoo.YahooOAuth',
'social.backends.angel.AngelOAuth2',
'social.backends.behance.BehanceOAuth2',
'social.backends.bitbucket.BitbucketOAuth',
'social.backends.box.BoxOAuth2',
'social.backends.linkedin.LinkedinOAuth',
'social.backends.github.GithubOAuth2',
'social.backends.foursquare.FoursquareOAuth2',
'social.backends.instagram.InstagramOAuth2',
'social.backends.live.LiveOAuth2',
'social.backends.vk.VKOAuth2',
'social.backends.dailymotion.DailymotionOAuth2',
'social.backends.disqus.DisqusOAuth2',
'social.backends.dropbox.DropboxOAuth',
'social.backends.eveonline.EVEOnlineOAuth2',
'social.backends.evernote.EvernoteSandboxOAuth',
'social.backends.fitbit.FitbitOAuth',
'social.backends.flickr.FlickrOAuth',
'social.backends.livejournal.LiveJournalOpenId',
'social.backends.soundcloud.SoundcloudOAuth2',
'social.backends.thisismyjam.ThisIsMyJamOAuth1',
'social.backends.stocktwits.StocktwitsOAuth2',
'social.backends.tripit.TripItOAuth',
'social.backends.clef.ClefOAuth2',
'social.backends.twilio.TwilioAuth',
'social.backends.xing.XingOAuth',
'social.backends.yandex.YandexOAuth2',
'social.backends.podio.PodioOAuth2',
'social.backends.reddit.RedditOAuth2',
'social.backends.mineid.MineIDOAuth2',
'social.backends.wunderlist.WunderlistOAuth2',
)
| bsd-3-clause |
ropik/chromium | chrome/common/extensions/docs/examples/apps/hello-python/main.py | 148 | 5230 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import users
from google.appengine.api import urlfetch
from google.appengine.ext.webapp import template
from google.appengine.api.urlfetch import DownloadError
import oauth2
import urllib
import logging
import os
import time
from django.utils import simplejson
# Configuration
CONFIG = {
'oauth_consumer_key': 'anonymous',
'oauth_consumer_secret': 'anonymous',
'license_server': 'https://www.googleapis.com',
'license_path': '%(server)s/chromewebstore/v1/licenses/%(appid)s/%(userid)s',
'oauth_token': 'INSERT OAUTH TOKEN HERE',
'oauth_token_secret': 'INSERT OAUTH TOKEN SECRET HERE',
'app_id': 'INSERT APPLICATION ID HERE',
}
# Check to see if the server has been deployed. In the dev server, this
# env variable will start with 'Development', in production, it will start with
# 'Google App Engine'
IS_PRODUCTION = os.environ['SERVER_SOFTWARE'].startswith('Google App Engine')
# Valid access levels that may be returned by the license server.
VALID_ACCESS_LEVELS = ['FREE_TRIAL', 'FULL']
def fetch_license_data(userid):
"""Fetches the license for a given user by making an OAuth signed request
to the license server.
Args:
userid OpenID of the user you are checking access for.
Returns:
The server's response as text.
"""
url = CONFIG['license_path'] % {
'server': CONFIG['license_server'],
'appid': CONFIG['app_id'],
'userid': urllib.quote_plus(userid),
}
oauth_token = oauth2.Token(**{
'key': CONFIG['oauth_token'],
'secret': CONFIG['oauth_token_secret']
})
oauth_consumer = oauth2.Consumer(**{
'key': CONFIG['oauth_consumer_key'],
'secret': CONFIG['oauth_consumer_secret']
})
logging.debug('Requesting %s' % url)
client = oauth2.Client(oauth_consumer, oauth_token)
resp, content = client.request(url, 'GET')
logging.debug('Got response code %s, content %s' % (resp, content))
return content
def parse_license_data(userid):
"""Returns the license for a given user as a structured object.
Args:
userid: The OpenID of the user to check.
Returns:
An object with the following parameters:
error: True if something went wrong, False otherwise.
message: A descriptive message if error is True.
access: One of 'NO', 'FREE_TRIAL', or 'FULL' depending on the access.
"""
license = {'error': False, 'message': '', 'access': 'NO'}
try:
response_text = fetch_license_data(userid)
try:
logging.debug('Attempting to JSON parse: %s' % response_text)
json = simplejson.loads(response_text)
logging.debug('Got license server response: %s' % json)
except ValueError:
logging.exception('Could not parse response as JSON: %s' % response_text)
license['error'] = True
license['message'] = 'Could not parse the license server response'
except DownloadError:
logging.exception('Could not fetch license data')
license['error'] = True
license['message'] = 'Could not fetch license data'
if json.has_key('error'):
license['error'] = True
license['message'] = json['error']['message']
elif json['result'] == 'YES' and json['accessLevel'] in VALID_ACCESS_LEVELS:
license['access'] = json['accessLevel']
return license
class MainHandler(webapp.RequestHandler):
"""Request handler class."""
def get(self):
"""Handler for GET requests."""
user = users.get_current_user()
if user:
if IS_PRODUCTION:
# We should use federated_identity in production, since the license
# server requires an OpenID
userid = user.federated_identity()
else:
# On the dev server, we won't have access to federated_identity, so
# just use a default OpenID which will never return YES.
# If you want to test different response values on the development
# server, just change this default value (e.g. append '-yes' or
# '-trial').
userid = ('https://www.google.com/accounts/o8/id?'
'id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
license_data = parse_license_data(userid)
template_data = {
'license': license_data,
'user_name': user.nickname(),
'user_id': userid,
'user_logout': users.create_logout_url(self.request.uri),
}
else:
# Force the OpenID login endpoint to be for Google accounts only, since
# the license server doesn't support any other type of OpenID provider.
login_url = users.create_login_url(dest_url='/',
federated_identity='google.com/accounts/o8/id')
template_data = {
'user_login': login_url,
}
# Render a simple template
path = os.path.join(os.path.dirname(__file__), 'templates', 'index.html')
self.response.out.write(template.render(path, template_data))
if __name__ == '__main__':
application = webapp.WSGIApplication([
('/', MainHandler),
], debug=False)
util.run_wsgi_app(application)
| bsd-3-clause |
Ramalus/herovoices | .buildozer/venv/lib/python2.7/site-packages/pip/_vendor/requests/status_codes.py | 926 | 3200 | # -*- coding: utf-8 -*-
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('permanent_redirect',
'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
}
codes = LookupDict(name='status_codes')
for (code, titles) in list(_codes.items()):
for title in titles:
setattr(codes, title, code)
if not title.startswith('\\'):
setattr(codes, title.upper(), code)
| gpl-2.0 |
Milad-Rakhsha/chrono | src/demos/python/irrlicht/demo_IRR_callbackSMC.py | 3 | 6973 | # =============================================================================
# PROJECT CHRONO - http:#projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http:#projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Simone Benatti
# =============================================================================
#
# Chrono demonstration of using contact callbacks for smooth contacts
# (penalty-based).
#
# The global reference frame has Y up.
#
# =============================================================================
import pychrono as chrono
import pychrono.irrlicht as chronoirr
# -----------------------------------------------------------------------------
# Callback class for contact reporting
# -----------------------------------------------------------------------------
class ContactReporter (chrono.ReportContactCallback):
def __init__(self, box1, box2) :
self.m_box1 = box1
self.m_box2 = box2
super().__init__()
def OnReportContact( self,
pA,
pB,
plane_coord,
distance,
eff_radius,
cforce,
ctorque,
modA,
modB):
frc = plane_coord * cforce;
bodyA = chrono.CastToChBody(modA)
bodyB = chrono.CastToChBody(modB)
if (bodyA == self.m_box1) :
print(" contact on Box 1 at pos: ", pA.x, pA.y, pA.z)
print(" frc: ", frc.x, frc.y, frc.z)
elif (bodyB == self.m_box1) :
print(" contact on Box 1 at pos: ", pB.x, pB.y, pB.z)
print(" frc: ", frc.x, frc.y, frc.z)
if (bodyA == self.m_box2) :
print(" contact on Box 2 at pos: ", pA.x, pA.y, pA.z)
print(" frc: ", frc.x, frc.y, frc.z)
elif (bodyB == self.m_box2) :
print(" contact on Box 2 at pos: ", pB.x, pB.y, pB.z)
print(" frc: ", frc.x, frc.y, frc.z)
return True
# -----------------------------------------------------------------------------
# Callback class for modifying composite material
# -----------------------------------------------------------------------------
class ContactMaterial(chrono.AddContactCallback):
def __init__(self):
super().__init__()
def OnAddContact( self,
contactinfo,
material):
# Downcast to appropriate composite material type
mat = chrono.CastToChMaterialCompositeSMC(material)
# Set different friction for left/right halfs
if (contactinfo.vpA.z > 0) :
friction = 0.3
else:
friction = 0.8
mat.mu_eff = friction
print( "Copyright (c) 2017 projectchrono.org")
# ----------------
# Parameters
# ----------------
friction = 0.6
# -----------------
# Create the system
# -----------------
system = chrono.ChSystemSMC()
system.Set_G_acc(chrono.ChVectorD(0, -10, 0))
# Set solver settings
system.SetSolverMaxIterations(100)
system.SetSolverForceTolerance(0)
# --------------------------------------------------
# Create a contact material, shared among all bodies
# --------------------------------------------------
material = chrono.ChMaterialSurfaceSMC()
material.SetFriction(friction)
# ----------
# Add bodies
# ----------
container = chrono.ChBody()
system.Add(container)
container.SetPos(chrono.ChVectorD(0, 0, 0))
container.SetBodyFixed(True)
container.SetIdentifier(-1)
container.SetCollide(True)
container.GetCollisionModel().ClearModel()
chrono.AddBoxGeometry(container, material, chrono.ChVectorD(4, 0.5, 4), chrono.ChVectorD(0, -0.5, 0))
container.GetCollisionModel().BuildModel()
container.AddAsset(chrono.ChColorAsset(chrono.ChColor(0.4, 0.4, 0.4)))
box1 = chrono.ChBody()
box1.SetMass(10)
box1.SetInertiaXX(chrono.ChVectorD(1, 1, 1))
box1.SetPos(chrono.ChVectorD(-1, 0.21, -1))
box1.SetPos_dt(chrono.ChVectorD(5, 0, 0))
box1.SetCollide(True)
box1.GetCollisionModel().ClearModel()
chrono.AddBoxGeometry(box1, material, chrono.ChVectorD(0.4, 0.2, 0.1))
box1.GetCollisionModel().BuildModel()
box1.AddAsset(chrono.ChColorAsset(chrono.ChColor(0.1, 0.1, 0.4)))
system.AddBody(box1)
box2 = chrono.ChBody(system.NewBody())
box2.SetMass(10)
box2.SetInertiaXX(chrono.ChVectorD(1, 1, 1))
box2.SetPos(chrono.ChVectorD(-1, 0.21, +1))
box2.SetPos_dt(chrono.ChVectorD(5, 0, 0))
box2.SetCollide(True)
box2.GetCollisionModel().ClearModel()
chrono.AddBoxGeometry(box2, material, chrono.ChVectorD(0.4, 0.2, 0.1))
box2.GetCollisionModel().BuildModel()
box2.AddAsset(chrono.ChColorAsset(chrono.ChColor(0.4, 0.1, 0.1)))
system.AddBody(box2)
# -------------------------------
# Create the visualization window
# -------------------------------
application = chronoirr.ChIrrApp(system, "SMC callbacks", chronoirr.dimension2du(800, 600))
application.AddTypicalLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
application.AddTypicalSky()
application.AddTypicalLights()
application.AddTypicalCamera(chronoirr.vector3df(4, 4, -6))
application.AssetBindAll()
application.AssetUpdateAll()
# ---------------
# Simulate system
# ---------------
creporter = ContactReporter(box1, box2)
cmaterial = ContactMaterial()
system.GetContactContainer().RegisterAddContactCallback(cmaterial)
application.SetTimestep(1e-3)
while (application.GetDevice().run()) :
application.BeginScene(True, True, chronoirr.SColor(255, 140, 161, 192))
application.DrawAll()
chronoirr.drawGrid(application.GetVideoDriver(), 0.5, 0.5, 12, 12,
chrono.ChCoordsysD(chrono.ChVectorD(0, 0, 0), chrono.Q_from_AngX(chrono.CH_C_PI_2)))
chronoirr.drawAllCOGs(system, application.GetVideoDriver(), 1.0)
application.DoStep()
application.EndScene()
# Process contacts
print(str(system.GetChTime() ) + " " + str(system.GetNcontacts()) )
system.GetContactContainer().ReportAllContacts(creporter)
# Cumulative contact force and torque on boxes (as applied to COM)
frc1 = box1.GetContactForce()
trq1 = box1.GetContactTorque()
print(" Box 1 contact force at COM: ", frc1.x, frc1.y, frc1.z)
print(" contact torque at COM: ", trq1.x, trq1.y, trq1.z)
frc2 = box2.GetContactForce()
trq2 = box2.GetContactTorque()
print(" Box 2 contact force at COM: ", frc2.x, frc2.y, frc2.z)
print(" contact torque at COM: ", trq2.x, trq2.y, trq2.z)
| bsd-3-clause |
lyft/incubator-airflow | airflow/models/base.py | 4 | 1248 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any
from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
from airflow.configuration import conf
SQL_ALCHEMY_SCHEMA = conf.get("core", "SQL_ALCHEMY_SCHEMA")
metadata = (
None
if not SQL_ALCHEMY_SCHEMA or SQL_ALCHEMY_SCHEMA.isspace()
else MetaData(schema=SQL_ALCHEMY_SCHEMA)
)
Base = declarative_base(metadata=metadata) # type: Any
ID_LEN = 250
# used for typing
class Operator:
pass
| apache-2.0 |
mixianghang/mptcp | scripts/analyze_suspend.py | 445 | 49020 | #!/usr/bin/python
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors:
# Todd Brandt <[email protected]>
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's suspend/resume time. Using a kernel image built
# with a few extra options enabled, the tool will execute a suspend and
# will capture dmesg and ftrace data until resume is complete. This data
# is transformed into a device timeline and a callgraph to give a quick
# and detailed view of which devices and callbacks are taking the most
# time in suspend/resume. The output is a single html file which can be
# viewed in firefox or chrome.
#
# The following kernel build options are required:
# CONFIG_PM_DEBUG=y
# CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER=y
# CONFIG_FUNCTION_GRAPH_TRACER=y
#
# The following additional kernel parameters are required:
# (e.g. in file /etc/default/grub)
# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..."
#
import sys
import time
import os
import string
import re
import array
import platform
import datetime
import struct
# -- classes --
class SystemValues:
testdir = "."
tpath = "/sys/kernel/debug/tracing/"
mempath = "/dev/mem"
powerfile = "/sys/power/state"
suspendmode = "mem"
prefix = "test"
teststamp = ""
dmesgfile = ""
ftracefile = ""
htmlfile = ""
rtcwake = False
def setOutputFile(self):
if((self.htmlfile == "") and (self.dmesgfile != "")):
m = re.match(r"(?P<name>.*)_dmesg\.txt$", self.dmesgfile)
if(m):
self.htmlfile = m.group("name")+".html"
if((self.htmlfile == "") and (self.ftracefile != "")):
m = re.match(r"(?P<name>.*)_ftrace\.txt$", self.ftracefile)
if(m):
self.htmlfile = m.group("name")+".html"
if(self.htmlfile == ""):
self.htmlfile = "output.html"
def initTestOutput(self):
hostname = platform.node()
if(hostname != ""):
self.prefix = hostname
v = os.popen("cat /proc/version").read().strip()
kver = string.split(v)[2]
self.testdir = os.popen("date \"+suspend-%m%d%y-%H%M%S\"").read().strip()
self.teststamp = "# "+self.testdir+" "+self.prefix+" "+self.suspendmode+" "+kver
self.dmesgfile = self.testdir+"/"+self.prefix+"_"+self.suspendmode+"_dmesg.txt"
self.ftracefile = self.testdir+"/"+self.prefix+"_"+self.suspendmode+"_ftrace.txt"
self.htmlfile = self.testdir+"/"+self.prefix+"_"+self.suspendmode+".html"
os.mkdir(self.testdir)
class Data:
altdevname = dict()
usedmesg = False
useftrace = False
notestrun = False
verbose = False
phases = []
dmesg = {} # root data structure
start = 0.0
end = 0.0
stamp = {'time': "", 'host': "", 'mode': ""}
id = 0
tSuspended = 0.0
fwValid = False
fwSuspend = 0
fwResume = 0
def initialize(self):
self.dmesg = { # dmesg log data
'suspend_general': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "#CCFFCC", 'order': 0},
'suspend_early': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "green", 'order': 1},
'suspend_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "#00FFFF", 'order': 2},
'suspend_cpu': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "blue", 'order': 3},
'resume_cpu': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "red", 'order': 4},
'resume_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "orange", 'order': 5},
'resume_early': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "yellow", 'order': 6},
'resume_general': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': "#FFFFCC", 'order': 7}
}
self.phases = self.sortedPhases()
def normalizeTime(self):
tSus = tRes = self.tSuspended
if self.fwValid:
tSus -= -self.fwSuspend / 1000000000.0
tRes -= self.fwResume / 1000000000.0
self.tSuspended = 0.0
self.start -= tSus
self.end -= tRes
for phase in self.phases:
zero = tRes
if "suspend" in phase:
zero = tSus
p = self.dmesg[phase]
p['start'] -= zero
p['end'] -= zero
list = p['list']
for name in list:
d = list[name]
d['start'] -= zero
d['end'] -= zero
if('ftrace' in d):
cg = d['ftrace']
cg.start -= zero
cg.end -= zero
for line in cg.list:
line.time -= zero
if self.fwValid:
fws = -self.fwSuspend / 1000000000.0
fwr = self.fwResume / 1000000000.0
list = dict()
self.id += 1
devid = "dc%d" % self.id
list["firmware-suspend"] = \
{'start': fws, 'end': 0, 'pid': 0, 'par': "",
'length': -fws, 'row': 0, 'id': devid };
self.id += 1
devid = "dc%d" % self.id
list["firmware-resume"] = \
{'start': 0, 'end': fwr, 'pid': 0, 'par': "",
'length': fwr, 'row': 0, 'id': devid };
self.dmesg['BIOS'] = \
{'list': list, 'start': fws, 'end': fwr,
'row': 0, 'color': "purple", 'order': 4}
self.dmesg['resume_cpu']['order'] += 1
self.dmesg['resume_noirq']['order'] += 1
self.dmesg['resume_early']['order'] += 1
self.dmesg['resume_general']['order'] += 1
self.phases = self.sortedPhases()
def vprint(self, msg):
if(self.verbose):
print(msg)
def dmesgSortVal(self, phase):
return self.dmesg[phase]['order']
def sortedPhases(self):
return sorted(self.dmesg, key=self.dmesgSortVal)
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
slist = []
tmp = dict()
for devname in list:
dev = list[devname]
tmp[dev['start']] = devname
for t in sorted(tmp):
slist.append(tmp[t])
return slist
def fixupInitcalls(self, phase, end):
# if any calls never returned, clip them at system resume end
phaselist = self.dmesg[phase]['list']
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
dev['end'] = end
self.vprint("%s (%s): callback didn't return" % (devname, phase))
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
for phase in self.phases:
self.fixupInitcalls(phase, self.dmesg['resume_general']['end'])
if(phase == "resume_general"):
break
def newAction(self, phase, name, pid, parent, start, end):
self.id += 1
devid = "dc%d" % self.id
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
list[name] = {'start': start, 'end': end, 'pid': pid, 'par': parent,
'length': length, 'row': 0, 'id': devid }
def deviceIDs(self, devlist, phase):
idlist = []
for p in self.phases:
if(p[0] != phase[0]):
continue
list = data.dmesg[p]['list']
for devname in list:
if devname in devlist:
idlist.append(list[devname]['id'])
return idlist
def deviceParentID(self, devname, phase):
pdev = ""
pdevid = ""
for p in self.phases:
if(p[0] != phase[0]):
continue
list = data.dmesg[p]['list']
if devname in list:
pdev = list[devname]['par']
for p in self.phases:
if(p[0] != phase[0]):
continue
list = data.dmesg[p]['list']
if pdev in list:
return list[pdev]['id']
return pdev
def deviceChildrenIDs(self, devname, phase):
devlist = []
for p in self.phases:
if(p[0] != phase[0]):
continue
list = data.dmesg[p]['list']
for child in list:
if(list[child]['par'] == devname):
devlist.append(child)
return self.deviceIDs(devlist, phase)
class FTraceLine:
time = 0.0
length = 0.0
fcall = False
freturn = False
fevent = False
depth = 0
name = ""
def __init__(self, t, m, d):
self.time = float(t)
# check to see if this is a trace event
em = re.match(r"^ *\/\* *(?P<msg>.*) \*\/ *$", m)
if(em):
self.name = em.group("msg")
self.fevent = True
return
# convert the duration to seconds
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
match = re.match(r"^(?P<d> *)(?P<o>.*)$", m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
m = match.group('o')
# function return
if(m[0] == '}'):
self.freturn = True
if(len(m) > 1):
# includes comment with function name
match = re.match(r"^} *\/\* *(?P<n>.*) *\*\/$", m)
if(match):
self.name = match.group('n')
# function call
else:
self.fcall = True
# function call with children
if(m[-1] == '{'):
match = re.match(r"^(?P<n>.*) *\(.*", m)
if(match):
self.name = match.group('n')
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
match = re.match(r"^(?P<n>.*) *\(.*", m)
if(match):
self.name = match.group('n')
# something else (possibly a trace marker)
else:
self.name = m
def getDepth(self, str):
return len(str)/2
class FTraceCallGraph:
start = -1.0
end = -1.0
list = []
invalid = False
depth = 0
def __init__(self):
self.start = -1.0
self.end = -1.0
self.list = []
self.depth = 0
def setDepth(self, line):
if(line.fcall and not line.freturn):
line.depth = self.depth
self.depth += 1
elif(line.freturn and not line.fcall):
self.depth -= 1
line.depth = self.depth
else:
line.depth = self.depth
def addLine(self, line, match):
if(not self.invalid):
self.setDepth(line)
if(line.depth == 0 and line.freturn):
self.end = line.time
self.list.append(line)
return True
if(self.invalid):
return False
if(len(self.list) >= 1000000 or self.depth < 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
id = "task %s cpu %s" % (match.group("pid"), match.group("cpu"))
window = "(%f - %f)" % (self.start, line.time)
data.vprint("Too much data for "+id+" "+window+", ignoring this callback")
return False
self.list.append(line)
if(self.start < 0):
self.start = line.time
return False
def sanityCheck(self):
stack = dict()
cnt = 0
for l in self.list:
if(l.fcall and not l.freturn):
stack[l.depth] = l
cnt += 1
elif(l.freturn and not l.fcall):
if(not stack[l.depth]):
return False
stack[l.depth].length = l.length
stack[l.depth] = 0
l.length = 0
cnt -= 1
if(cnt == 0):
return True
return False
def debugPrint(self, filename):
if(filename == "stdout"):
print("[%f - %f]") % (self.start, self.end)
for l in self.list:
if(l.freturn and l.fcall):
print("%f (%02d): %s(); (%.3f us)" % (l.time, l.depth, l.name, l.length*1000000))
elif(l.freturn):
print("%f (%02d): %s} (%.3f us)" % (l.time, l.depth, l.name, l.length*1000000))
else:
print("%f (%02d): %s() { (%.3f us)" % (l.time, l.depth, l.name, l.length*1000000))
print(" ")
else:
fp = open(filename, 'w')
print(filename)
for l in self.list:
if(l.freturn and l.fcall):
fp.write("%f (%02d): %s(); (%.3f us)\n" % (l.time, l.depth, l.name, l.length*1000000))
elif(l.freturn):
fp.write("%f (%02d): %s} (%.3f us)\n" % (l.time, l.depth, l.name, l.length*1000000))
else:
fp.write("%f (%02d): %s() { (%.3f us)\n" % (l.time, l.depth, l.name, l.length*1000000))
fp.close()
class Timeline:
html = {}
scaleH = 0.0 # height of the timescale row as a percent of the timeline height
rowH = 0.0 # height of each row in percent of the timeline height
row_height_pixels = 30
maxrows = 0
height = 0
def __init__(self):
self.html = {
'timeline': "",
'legend': "",
'scale': ""
}
def setRows(self, rows):
self.maxrows = int(rows)
self.scaleH = 100.0/float(self.maxrows)
self.height = self.maxrows*self.row_height_pixels
r = float(self.maxrows - 1)
if(r < 1.0):
r = 1.0
self.rowH = (100.0 - self.scaleH)/r
# -- global objects --
sysvals = SystemValues()
data = Data()
# -- functions --
# Function: initFtrace
# Description:
# Configure ftrace to capture a function trace during suspend/resume
def initFtrace():
global sysvals
print("INITIALIZING FTRACE...")
# turn trace off
os.system("echo 0 > "+sysvals.tpath+"tracing_on")
# set the trace clock to global
os.system("echo global > "+sysvals.tpath+"trace_clock")
# set trace buffer to a huge value
os.system("echo nop > "+sysvals.tpath+"current_tracer")
os.system("echo 100000 > "+sysvals.tpath+"buffer_size_kb")
# clear the trace buffer
os.system("echo \"\" > "+sysvals.tpath+"trace")
# set trace type
os.system("echo function_graph > "+sysvals.tpath+"current_tracer")
os.system("echo \"\" > "+sysvals.tpath+"set_ftrace_filter")
# set trace format options
os.system("echo funcgraph-abstime > "+sysvals.tpath+"trace_options")
os.system("echo funcgraph-proc > "+sysvals.tpath+"trace_options")
# focus only on device suspend and resume
os.system("cat "+sysvals.tpath+"available_filter_functions | grep dpm_run_callback > "+sysvals.tpath+"set_graph_function")
# Function: verifyFtrace
# Description:
# Check that ftrace is working on the system
def verifyFtrace():
global sysvals
files = ["available_filter_functions", "buffer_size_kb",
"current_tracer", "set_ftrace_filter",
"trace", "trace_marker"]
for f in files:
if(os.path.exists(sysvals.tpath+f) == False):
return False
return True
def parseStamp(line):
global data, sysvals
stampfmt = r"# suspend-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-"+\
"(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})"+\
" (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$"
m = re.match(stampfmt, line)
if(m):
dt = datetime.datetime(int(m.group("y"))+2000, int(m.group("m")),
int(m.group("d")), int(m.group("H")), int(m.group("M")),
int(m.group("S")))
data.stamp['time'] = dt.strftime("%B %d %Y, %I:%M:%S %p")
data.stamp['host'] = m.group("host")
data.stamp['mode'] = m.group("mode")
data.stamp['kernel'] = m.group("kernel")
sysvals.suspendmode = data.stamp['mode']
# Function: analyzeTraceLog
# Description:
# Analyse an ftrace log output file generated from this app during
# the execution phase. Create an "ftrace" structure in memory for
# subsequent formatting in the html output file
def analyzeTraceLog():
global sysvals, data
# the ftrace data is tied to the dmesg data
if(not data.usedmesg):
return
# read through the ftrace and parse the data
data.vprint("Analyzing the ftrace data...")
ftrace_line_fmt = r"^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)"+\
" *(?P<proc>.*)-(?P<pid>[0-9]*) *\|"+\
"[ +!]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)"
ftemp = dict()
inthepipe = False
tf = open(sysvals.ftracefile, 'r')
count = 0
for line in tf:
count = count + 1
# grab the time stamp if it's valid
if(count == 1):
parseStamp(line)
continue
# parse only valid lines
m = re.match(ftrace_line_fmt, line)
if(not m):
continue
m_time = m.group("time")
m_pid = m.group("pid")
m_msg = m.group("msg")
m_dur = m.group("dur")
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_dur)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# only parse the ftrace data during suspend/resume
if(not inthepipe):
# look for the suspend start marker
if(t.fevent):
if(t.name == "SUSPEND START"):
data.vprint("SUSPEND START %f %s:%d" % (t.time, sysvals.ftracefile, count))
inthepipe = True
continue
else:
# look for the resume end marker
if(t.fevent):
if(t.name == "RESUME COMPLETE"):
data.vprint("RESUME COMPLETE %f %s:%d" % (t.time, sysvals.ftracefile, count))
inthepipe = False
break
continue
# create a callgraph object for the data
if(pid not in ftemp):
ftemp[pid] = FTraceCallGraph()
# when the call is finished, see which device matches it
if(ftemp[pid].addLine(t, m)):
if(not ftemp[pid].sanityCheck()):
id = "task %s cpu %s" % (pid, m.group("cpu"))
data.vprint("Sanity check failed for "+id+", ignoring this callback")
continue
callstart = ftemp[pid].start
callend = ftemp[pid].end
for p in data.phases:
if(data.dmesg[p]['start'] <= callstart and callstart <= data.dmesg[p]['end']):
list = data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and callstart <= dev['start'] and callend >= dev['end']):
data.vprint("%15s [%f - %f] %s(%d)" % (p, callstart, callend, devname, pid))
dev['ftrace'] = ftemp[pid]
break
ftemp[pid] = FTraceCallGraph()
tf.close()
# Function: sortKernelLog
# Description:
# The dmesg output log sometimes comes with with lines that have
# timestamps out of order. This could cause issues since a call
# could accidentally end up in the wrong phase
def sortKernelLog():
global sysvals, data
lf = open(sysvals.dmesgfile, 'r')
dmesglist = []
count = 0
for line in lf:
line = line.replace("\r\n", "")
if(count == 0):
parseStamp(line)
elif(count == 1):
m = re.match(r"# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$", line)
if(m):
data.fwSuspend = int(m.group("s"))
data.fwResume = int(m.group("r"))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
if(re.match(r".*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)", line)):
dmesglist.append(line)
count += 1
lf.close()
last = ""
# fix lines with the same time stamp and function with the call and return swapped
for line in dmesglist:
mc = re.match(r".*(\[ *)(?P<t>[0-9\.]*)(\]) calling (?P<f>.*)\+ @ .*, parent: .*", line)
mr = re.match(r".*(\[ *)(?P<t>[0-9\.]*)(\]) call (?P<f>.*)\+ returned .* after (?P<dt>.*) usecs", last)
if(mc and mr and (mc.group("t") == mr.group("t")) and (mc.group("f") == mr.group("f"))):
i = dmesglist.index(last)
j = dmesglist.index(line)
dmesglist[i] = line
dmesglist[j] = last
last = line
return dmesglist
# Function: analyzeKernelLog
# Description:
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
def analyzeKernelLog():
global sysvals, data
print("PROCESSING DATA")
data.vprint("Analyzing the dmesg data...")
if(os.path.exists(sysvals.dmesgfile) == False):
print("ERROR: %s doesn't exist") % sysvals.dmesgfile
return False
lf = sortKernelLog()
phase = "suspend_runtime"
dm = {
'suspend_general': r"PM: Syncing filesystems.*",
'suspend_early': r"PM: suspend of devices complete after.*",
'suspend_noirq': r"PM: late suspend of devices complete after.*",
'suspend_cpu': r"PM: noirq suspend of devices complete after.*",
'resume_cpu': r"ACPI: Low-level resume complete.*",
'resume_noirq': r"ACPI: Waking up from system sleep state.*",
'resume_early': r"PM: noirq resume of devices complete after.*",
'resume_general': r"PM: early resume of devices complete after.*",
'resume_complete': r".*Restarting tasks \.\.\..*",
}
if(sysvals.suspendmode == "standby"):
dm['resume_cpu'] = r"PM: Restoring platform NVS memory"
elif(sysvals.suspendmode == "disk"):
dm['suspend_early'] = r"PM: freeze of devices complete after.*"
dm['suspend_noirq'] = r"PM: late freeze of devices complete after.*"
dm['suspend_cpu'] = r"PM: noirq freeze of devices complete after.*"
dm['resume_cpu'] = r"PM: Restoring platform NVS memory"
dm['resume_early'] = r"PM: noirq restore of devices complete after.*"
dm['resume_general'] = r"PM: early restore of devices complete after.*"
action_start = 0.0
for line in lf:
# -- preprocessing --
# parse each dmesg line into the time and message
m = re.match(r".*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)", line)
if(m):
ktime = float(m.group("ktime"))
msg = m.group("msg")
else:
print line
continue
# -- phase changes --
# suspend_general start
if(re.match(dm['suspend_general'], msg)):
phase = "suspend_general"
data.dmesg[phase]['start'] = ktime
data.start = ktime
# action start: syncing filesystems
action_start = ktime
# suspend_early start
elif(re.match(dm['suspend_early'], msg)):
data.dmesg["suspend_general"]['end'] = ktime
phase = "suspend_early"
data.dmesg[phase]['start'] = ktime
# suspend_noirq start
elif(re.match(dm['suspend_noirq'], msg)):
data.dmesg["suspend_early"]['end'] = ktime
phase = "suspend_noirq"
data.dmesg[phase]['start'] = ktime
# suspend_cpu start
elif(re.match(dm['suspend_cpu'], msg)):
data.dmesg["suspend_noirq"]['end'] = ktime
phase = "suspend_cpu"
data.dmesg[phase]['start'] = ktime
# resume_cpu start
elif(re.match(dm['resume_cpu'], msg)):
data.tSuspended = ktime
data.dmesg["suspend_cpu"]['end'] = ktime
phase = "resume_cpu"
data.dmesg[phase]['start'] = ktime
# resume_noirq start
elif(re.match(dm['resume_noirq'], msg)):
data.dmesg["resume_cpu"]['end'] = ktime
phase = "resume_noirq"
data.dmesg[phase]['start'] = ktime
# action end: ACPI resume
data.newAction("resume_cpu", "ACPI", -1, "", action_start, ktime)
# resume_early start
elif(re.match(dm['resume_early'], msg)):
data.dmesg["resume_noirq"]['end'] = ktime
phase = "resume_early"
data.dmesg[phase]['start'] = ktime
# resume_general start
elif(re.match(dm['resume_general'], msg)):
data.dmesg["resume_early"]['end'] = ktime
phase = "resume_general"
data.dmesg[phase]['start'] = ktime
# resume complete start
elif(re.match(dm['resume_complete'], msg)):
data.dmesg["resume_general"]['end'] = ktime
data.end = ktime
phase = "resume_runtime"
break
# -- device callbacks --
if(phase in data.phases):
# device init call
if(re.match(r"calling (?P<f>.*)\+ @ .*, parent: .*", msg)):
sm = re.match(r"calling (?P<f>.*)\+ @ (?P<n>.*), parent: (?P<p>.*)", msg);
f = sm.group("f")
n = sm.group("n")
p = sm.group("p")
if(f and n and p):
data.newAction(phase, f, int(n), p, ktime, -1)
# device init return
elif(re.match(r"call (?P<f>.*)\+ returned .* after (?P<t>.*) usecs", msg)):
sm = re.match(r"call (?P<f>.*)\+ returned .* after (?P<t>.*) usecs(?P<a>.*)", msg);
f = sm.group("f")
t = sm.group("t")
list = data.dmesg[phase]['list']
if(f in list):
dev = list[f]
dev['length'] = int(t)
dev['end'] = ktime
data.vprint("%15s [%f - %f] %s(%d) %s" %
(phase, dev['start'], dev['end'], f, dev['pid'], dev['par']))
# -- phase specific actions --
if(phase == "suspend_general"):
if(re.match(r"PM: Preparing system for mem sleep.*", msg)):
data.newAction(phase, "filesystem-sync", -1, "", action_start, ktime)
elif(re.match(r"Freezing user space processes .*", msg)):
action_start = ktime
elif(re.match(r"Freezing remaining freezable tasks.*", msg)):
data.newAction(phase, "freeze-user-processes", -1, "", action_start, ktime)
action_start = ktime
elif(re.match(r"PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*", msg)):
data.newAction(phase, "freeze-tasks", -1, "", action_start, ktime)
elif(phase == "suspend_cpu"):
m = re.match(r"smpboot: CPU (?P<cpu>[0-9]*) is now offline", msg)
if(m):
cpu = "CPU"+m.group("cpu")
data.newAction(phase, cpu, -1, "", action_start, ktime)
action_start = ktime
elif(re.match(r"ACPI: Preparing to enter system sleep state.*", msg)):
action_start = ktime
elif(re.match(r"Disabling non-boot CPUs .*", msg)):
data.newAction(phase, "ACPI", -1, "", action_start, ktime)
action_start = ktime
elif(phase == "resume_cpu"):
m = re.match(r"CPU(?P<cpu>[0-9]*) is up", msg)
if(m):
cpu = "CPU"+m.group("cpu")
data.newAction(phase, cpu, -1, "", action_start, ktime)
action_start = ktime
elif(re.match(r"Enabling non-boot CPUs .*", msg)):
action_start = ktime
# fill in any missing phases
lp = "suspend_general"
for p in data.phases:
if(p == "suspend_general"):
continue
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == "resume_cpu"):
data.tSuspended = data.dmesg[lp]['end']
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
data.fixupInitcallsThatDidntReturn()
return True
# Function: setTimelineRows
# Description:
# Organize the device or thread lists into the smallest
# number of rows possible, with no entry overlapping
# Arguments:
# list: the list to sort (dmesg or ftrace)
# sortedkeys: sorted key list to use
def setTimelineRows(list, sortedkeys):
global data
# clear all rows and set them to undefined
remaining = len(list)
rowdata = dict()
row = 0
for item in list:
list[item]['row'] = -1
# try to pack each row with as many ranges as possible
while(remaining > 0):
if(row not in rowdata):
rowdata[row] = []
for item in sortedkeys:
if(list[item]['row'] < 0):
s = list[item]['start']
e = list[item]['end']
valid = True
for ritem in rowdata[row]:
rs = ritem['start']
re = ritem['end']
if(not (((s <= rs) and (e <= rs)) or ((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(list[item])
list[item]['row'] = row
remaining -= 1
row += 1
return row
# Function: createTimeScale
# Description:
# Create timescale lines for the dmesg and ftrace timelines
# Arguments:
# t0: start time (suspend begin)
# tMax: end time (resume end)
# tSuspend: time when suspend occurs
def createTimeScale(t0, tMax, tSuspended):
global data
timescale = "<div class=\"t\" style=\"right:{0}%\">{1}</div>\n"
output = '<div id="timescale">\n'
# set scale for timeline
tTotal = tMax - t0
tS = 0.1
if(tTotal <= 0):
return output
if(tTotal > 4):
tS = 1
if(tSuspended < 0):
for i in range(int(tTotal/tS)+1):
pos = "%0.3f" % (100 - ((float(i)*tS*100)/tTotal))
if(i > 0):
val = "%0.f" % (float(i)*tS*1000)
else:
val = ""
output += timescale.format(pos, val)
else:
tSuspend = tSuspended - t0
divTotal = int(tTotal/tS) + 1
divSuspend = int(tSuspend/tS)
s0 = (tSuspend - tS*divSuspend)*100/tTotal
for i in range(divTotal):
pos = "%0.3f" % (100 - ((float(i)*tS*100)/tTotal) - s0)
if((i == 0) and (s0 < 3)):
val = ""
elif(i == divSuspend):
val = "S/R"
else:
val = "%0.f" % (float(i-divSuspend)*tS*1000)
output += timescale.format(pos, val)
output += '</div>\n'
return output
# Function: createHTML
# Description:
# Create the output html file.
def createHTML():
global sysvals, data
data.normalizeTime()
# html function templates
headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
html_zoombox = '<center><button id="zoomin">ZOOM IN</button><button id="zoomout">ZOOM OUT</button><button id="zoomdef">ZOOM 1:1</button></center>\n<div id="dmesgzoombox" class="zoombox">\n'
html_timeline = '<div id="{0}" class="timeline" style="height:{1}px">\n'
html_device = '<div id="{0}" title="{1}" class="thread" style="left:{2}%;top:{3}%;height:{4}%;width:{5}%;">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}%;height:{3}%;background-color:{4}">{5}</div>\n'
html_legend = '<div class="square" style="left:{0}%;background-color:{1}"> {2}</div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="gray">{2} Suspend Time: <b>{0} ms</b></td>'\
'<td class="gray">{2} Resume Time: <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
html_timegroups = '<table class="time2">\n<tr>'\
'<td class="green">Kernel Suspend: {0} ms</td>'\
'<td class="purple">Firmware Suspend: {1} ms</td>'\
'<td class="purple">Firmware Resume: {2} ms</td>'\
'<td class="yellow">Kernel Resume: {3} ms</td>'\
'</tr>\n</table>\n'
# device timeline (dmesg)
if(data.usedmesg):
data.vprint("Creating Device Timeline...")
devtl = Timeline()
# Generate the header for this timeline
t0 = data.start
tMax = data.end
tTotal = tMax - t0
if(tTotal == 0):
print("ERROR: No timeline data")
sys.exit()
suspend_time = "%.0f"%(-data.start*1000)
resume_time = "%.0f"%(data.end*1000)
if data.fwValid:
devtl.html['timeline'] = html_timetotal.format(suspend_time, resume_time, "Total")
sktime = "%.3f"%((data.dmesg['suspend_cpu']['end'] - data.dmesg['suspend_general']['start'])*1000)
sftime = "%.3f"%(data.fwSuspend / 1000000.0)
rftime = "%.3f"%(data.fwResume / 1000000.0)
rktime = "%.3f"%((data.dmesg['resume_general']['end'] - data.dmesg['resume_cpu']['start'])*1000)
devtl.html['timeline'] += html_timegroups.format(sktime, sftime, rftime, rktime)
else:
devtl.html['timeline'] = html_timetotal.format(suspend_time, resume_time, "Kernel")
# determine the maximum number of rows we need to draw
timelinerows = 0
for phase in data.dmesg:
list = data.dmesg[phase]['list']
rows = setTimelineRows(list, list)
data.dmesg[phase]['row'] = rows
if(rows > timelinerows):
timelinerows = rows
# calculate the timeline height and create its bounding box
devtl.setRows(timelinerows + 1)
devtl.html['timeline'] += html_zoombox;
devtl.html['timeline'] += html_timeline.format("dmesg", devtl.height);
# draw the colored boxes for each of the phases
for b in data.dmesg:
phase = data.dmesg[b]
left = "%.3f" % (((phase['start']-data.start)*100)/tTotal)
width = "%.3f" % (((phase['end']-phase['start'])*100)/tTotal)
devtl.html['timeline'] += html_phase.format(left, width, "%.3f"%devtl.scaleH, "%.3f"%(100-devtl.scaleH), data.dmesg[b]['color'], "")
# draw the time scale, try to make the number of labels readable
devtl.html['scale'] = createTimeScale(t0, tMax, data.tSuspended)
devtl.html['timeline'] += devtl.html['scale']
for b in data.dmesg:
phaselist = data.dmesg[b]['list']
for d in phaselist:
name = d
if(d in data.altdevname):
name = data.altdevname[d]
dev = phaselist[d]
height = (100.0 - devtl.scaleH)/data.dmesg[b]['row']
top = "%.3f" % ((dev['row']*height) + devtl.scaleH)
left = "%.3f" % (((dev['start']-data.start)*100)/tTotal)
width = "%.3f" % (((dev['end']-dev['start'])*100)/tTotal)
len = " (%0.3f ms) " % ((dev['end']-dev['start'])*1000)
color = "rgba(204,204,204,0.5)"
devtl.html['timeline'] += html_device.format(dev['id'], name+len+b, left, top, "%.3f"%height, width, name)
# timeline is finished
devtl.html['timeline'] += "</div>\n</div>\n"
# draw a legend which describes the phases by color
devtl.html['legend'] = "<div class=\"legend\">\n"
pdelta = 100.0/data.phases.__len__()
pmargin = pdelta / 4.0
for phase in data.phases:
order = "%.2f" % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
name = string.replace(phase, "_", " ")
devtl.html['legend'] += html_legend.format(order, data.dmesg[phase]['color'], name)
devtl.html['legend'] += "</div>\n"
hf = open(sysvals.htmlfile, 'w')
thread_height = 0
# write the html header first (html head, css code, everything up to the start of body)
html_header = "<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv=\"content-type\" content=\"text/html; charset=UTF-8\">\n\
<title>AnalyzeSuspend</title>\n\
<style type='text/css'>\n\
body {overflow-y: scroll;}\n\
.stamp {width: 100%;text-align:center;background-color:gray;line-height:30px;color:white;font: 25px Arial;}\n\
.callgraph {margin-top: 30px;box-shadow: 5px 5px 20px black;}\n\
.callgraph article * {padding-left: 28px;}\n\
h1 {color:black;font: bold 30px Times;}\n\
table {width:100%;}\n\
.gray {background-color:rgba(80,80,80,0.1);}\n\
.green {background-color:rgba(204,255,204,0.4);}\n\
.purple {background-color:rgba(128,0,128,0.2);}\n\
.yellow {background-color:rgba(255,255,204,0.4);}\n\
.time1 {font: 22px Arial;border:1px solid;}\n\
.time2 {font: 15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\
td {text-align: center;}\n\
.tdhl {color: red;}\n\
.hide {display: none;}\n\
.pf {display: none;}\n\
.pf:checked + label {background: url(\'data:image/svg+xml;utf,<?xml version=\"1.0\" standalone=\"no\"?><svg xmlns=\"http://www.w3.org/2000/svg\" height=\"18\" width=\"18\" version=\"1.1\"><circle cx=\"9\" cy=\"9\" r=\"8\" stroke=\"black\" stroke-width=\"1\" fill=\"white\"/><rect x=\"4\" y=\"8\" width=\"10\" height=\"2\" style=\"fill:black;stroke-width:0\"/><rect x=\"8\" y=\"4\" width=\"2\" height=\"10\" style=\"fill:black;stroke-width:0\"/></svg>\') no-repeat left center;}\n\
.pf:not(:checked) ~ label {background: url(\'data:image/svg+xml;utf,<?xml version=\"1.0\" standalone=\"no\"?><svg xmlns=\"http://www.w3.org/2000/svg\" height=\"18\" width=\"18\" version=\"1.1\"><circle cx=\"9\" cy=\"9\" r=\"8\" stroke=\"black\" stroke-width=\"1\" fill=\"white\"/><rect x=\"4\" y=\"8\" width=\"10\" height=\"2\" style=\"fill:black;stroke-width:0\"/></svg>\') no-repeat left center;}\n\
.pf:checked ~ *:not(:nth-child(2)) {display: none;}\n\
.zoombox {position: relative; width: 100%; overflow-x: scroll;}\n\
.timeline {position: relative; font-size: 14px;cursor: pointer;width: 100%; overflow: hidden; background-color:#dddddd;}\n\
.thread {position: absolute; height: "+"%.3f"%thread_height+"%; overflow: hidden; line-height: 30px; border:1px solid;text-align:center;white-space:nowrap;background-color:rgba(204,204,204,0.5);}\n\
.thread:hover {background-color:white;border:1px solid red;z-index:10;}\n\
.phase {position: absolute;overflow: hidden;border:0px;text-align:center;}\n\
.t {position: absolute; top: 0%; height: 100%; border-right:1px solid black;}\n\
.legend {position: relative; width: 100%; height: 40px; text-align: center;margin-bottom:20px}\n\
.legend .square {position:absolute;top:10px; width: 0px;height: 20px;border:1px solid;padding-left:20px;}\n\
button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
</style>\n</head>\n<body>\n"
hf.write(html_header)
# write the test title and general info header
if(data.stamp['time'] != ""):
hf.write(headline_stamp.format(data.stamp['host'],
data.stamp['kernel'], data.stamp['mode'], data.stamp['time']))
# write the dmesg data (device timeline)
if(data.usedmesg):
hf.write(devtl.html['timeline'])
hf.write(devtl.html['legend'])
hf.write('<div id="devicedetail"></div>\n')
hf.write('<div id="devicetree"></div>\n')
# write the ftrace data (callgraph)
if(data.useftrace):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
html_func_top = '<article id="{0}" class="atop" style="background-color:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
num = 0
for p in data.phases:
list = data.dmesg[p]['list']
for devname in data.sortedDevices(p):
if('ftrace' not in list[devname]):
continue
name = devname
if(devname in data.altdevname):
name = data.altdevname[devname]
devid = list[devname]['id']
cg = list[devname]['ftrace']
flen = "(%.3f ms)" % ((cg.end - cg.start)*1000)
hf.write(html_func_top.format(devid, data.dmesg[p]['color'], num, name+" "+p, flen))
num += 1
for line in cg.list:
if(line.length < 0.000000001):
flen = ""
else:
flen = "(%.3f ms)" % (line.length*1000)
if(line.freturn and line.fcall):
hf.write(html_func_leaf.format(line.name, flen))
elif(line.freturn):
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
hf.write("\n\n </section>\n")
# write the footer and close
addScriptCode(hf)
hf.write("</body>\n</html>\n")
hf.close()
return True
def addScriptCode(hf):
global data
t0 = (data.start - data.tSuspended) * 1000
tMax = (data.end - data.tSuspended) * 1000
# create an array in javascript memory with the device details
detail = ' var bounds = [%f,%f];\n' % (t0, tMax)
detail += ' var d = [];\n'
dfmt = ' d["%s"] = { n:"%s", p:"%s", c:[%s] };\n';
for p in data.dmesg:
list = data.dmesg[p]['list']
for d in list:
parent = data.deviceParentID(d, p)
idlist = data.deviceChildrenIDs(d, p)
idstr = ""
for i in idlist:
if(idstr == ""):
idstr += '"'+i+'"'
else:
idstr += ', '+'"'+i+'"'
detail += dfmt % (list[d]['id'], d, parent, idstr)
# add the code which will manipulate the data in the browser
script_code = \
'<script type="text/javascript">\n'+detail+\
' var filter = [];\n'\
' var table = [];\n'\
' function deviceParent(devid) {\n'\
' var devlist = [];\n'\
' if(filter.indexOf(devid) < 0) filter[filter.length] = devid;\n'\
' if(d[devid].p in d)\n'\
' devlist = deviceParent(d[devid].p);\n'\
' else if(d[devid].p != "")\n'\
' devlist = [d[devid].p];\n'\
' devlist[devlist.length] = d[devid].n;\n'\
' return devlist;\n'\
' }\n'\
' function deviceChildren(devid, column, row) {\n'\
' if(!(devid in d)) return;\n'\
' if(filter.indexOf(devid) < 0) filter[filter.length] = devid;\n'\
' var cell = {name: d[devid].n, span: 1};\n'\
' var span = 0;\n'\
' if(column >= table.length) table[column] = [];\n'\
' table[column][row] = cell;\n'\
' for(var i = 0; i < d[devid].c.length; i++) {\n'\
' var cid = d[devid].c[i];\n'\
' span += deviceChildren(cid, column+1, row+span);\n'\
' }\n'\
' if(span == 0) span = 1;\n'\
' table[column][row].span = span;\n'\
' return span;\n'\
' }\n'\
' function deviceTree(devid, resume) {\n'\
' var html = "<table border=1>";\n'\
' filter = [];\n'\
' table = [];\n'\
' plist = deviceParent(devid);\n'\
' var devidx = plist.length - 1;\n'\
' for(var i = 0; i < devidx; i++)\n'\
' table[i] = [{name: plist[i], span: 1}];\n'\
' deviceChildren(devid, devidx, 0);\n'\
' for(var i = 0; i < devidx; i++)\n'\
' table[i][0].span = table[devidx][0].span;\n'\
' for(var row = 0; row < table[0][0].span; row++) {\n'\
' html += "<tr>";\n'\
' for(var col = 0; col < table.length; col++)\n'\
' if(row in table[col]) {\n'\
' var cell = table[col][row];\n'\
' var args = "";\n'\
' if(cell.span > 1)\n'\
' args += " rowspan="+cell.span;\n'\
' if((col == devidx) && (row == 0))\n'\
' args += " class=tdhl";\n'\
' if(resume)\n'\
' html += "<td"+args+">"+cell.name+" →</td>";\n'\
' else\n'\
' html += "<td"+args+">← "+cell.name+"</td>";\n'\
' }\n'\
' html += "</tr>";\n'\
' }\n'\
' html += "</table>";\n'\
' return html;\n'\
' }\n'\
' function zoomTimeline() {\n'\
' var timescale = document.getElementById("timescale");\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' var val = parseFloat(dmesg.style.width);\n'\
' var newval = 100;\n'\
' var sh = window.outerWidth / 2;\n'\
' if(this.id == "zoomin") {\n'\
' newval = val * 1.2;\n'\
' if(newval > 40000) newval = 40000;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
' } else if (this.id == "zoomout") {\n'\
' newval = val / 1.2;\n'\
' if(newval < 100) newval = 100;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
' } else {\n'\
' zoombox.scrollLeft = 0;\n'\
' dmesg.style.width = "100%";\n'\
' }\n'\
' var html = "";\n'\
' var t0 = bounds[0];\n'\
' var tMax = bounds[1];\n'\
' var tTotal = tMax - t0;\n'\
' var wTotal = tTotal * 100.0 / newval;\n'\
' for(var tS = 1000; (wTotal / tS) < 3; tS /= 10);\n'\
' if(tS < 1) tS = 1;\n'\
' for(var s = ((t0 / tS)|0) * tS; s < tMax; s += tS) {\n'\
' var pos = (tMax - s) * 100.0 / tTotal;\n'\
' var name = (s == 0)?"S/R":(s+"ms");\n'\
' html += \"<div class=\\\"t\\\" style=\\\"right:\"+pos+\"%\\\">\"+name+\"</div>\";\n'\
' }\n'\
' timescale.innerHTML = html;\n'\
' }\n'\
' function deviceDetail() {\n'\
' var devtitle = document.getElementById("devicedetail");\n'\
' devtitle.innerHTML = "<h1>"+this.title+"</h1>";\n'\
' var devtree = document.getElementById("devicetree");\n'\
' devtree.innerHTML = deviceTree(this.id, (this.title.indexOf("resume") >= 0));\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' if(filter.indexOf(cg[i].id) >= 0) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' window.addEventListener("load", function () {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' dmesg.style.width = "100%"\n'\
' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].onclick = deviceDetail;\n'\
' }\n'\
' zoomTimeline();\n'\
' });\n'\
'</script>\n'
hf.write(script_code);
# Function: executeSuspend
# Description:
# Execute system suspend through the sysfs interface
def executeSuspend():
global sysvals, data
detectUSB()
pf = open(sysvals.powerfile, 'w')
# clear the kernel ring buffer just as we start
os.system("dmesg -C")
# start ftrace
if(data.useftrace):
print("START TRACING")
os.system("echo 1 > "+sysvals.tpath+"tracing_on")
os.system("echo SUSPEND START > "+sysvals.tpath+"trace_marker")
# initiate suspend
if(sysvals.rtcwake):
print("SUSPEND START")
os.system("rtcwake -s 10 -m "+sysvals.suspendmode)
else:
print("SUSPEND START (press a key to resume)")
pf.write(sysvals.suspendmode)
# execution will pause here
pf.close()
# return from suspend
print("RESUME COMPLETE")
# stop ftrace
if(data.useftrace):
os.system("echo RESUME COMPLETE > "+sysvals.tpath+"trace_marker")
os.system("echo 0 > "+sysvals.tpath+"tracing_on")
print("CAPTURING FTRACE")
os.system("echo \""+sysvals.teststamp+"\" > "+sysvals.ftracefile)
os.system("cat "+sysvals.tpath+"trace >> "+sysvals.ftracefile)
# grab a copy of the dmesg output
print("CAPTURING DMESG")
os.system("echo \""+sysvals.teststamp+"\" > "+sysvals.dmesgfile)
os.system("dmesg -c >> "+sysvals.dmesgfile)
# Function: detectUSB
# Description:
# Detect all the USB hosts and devices currently connected
def detectUSB():
global sysvals, data
for dirname, dirnames, filenames in os.walk("/sys/devices"):
if(re.match(r".*/usb[0-9]*.*", dirname) and
"idVendor" in filenames and "idProduct" in filenames):
vid = os.popen("cat %s/idVendor 2>/dev/null" % dirname).read().replace('\n', '')
pid = os.popen("cat %s/idProduct 2>/dev/null" % dirname).read().replace('\n', '')
product = os.popen("cat %s/product 2>/dev/null" % dirname).read().replace('\n', '')
name = dirname.split('/')[-1]
if(len(product) > 0):
data.altdevname[name] = "%s [%s]" % (product, name)
else:
data.altdevname[name] = "%s:%s [%s]" % (vid, pid, name)
def getModes():
global sysvals
modes = ""
if(os.path.exists(sysvals.powerfile)):
fp = open(sysvals.powerfile, 'r')
modes = string.split(fp.read())
fp.close()
return modes
# Function: statusCheck
# Description:
# Verify that the requested command and options will work
def statusCheck(dryrun):
global sysvals, data
res = dict()
if(data.notestrun):
print("SUCCESS: The command should run!")
return
# check we have root access
check = "YES"
if(os.environ['USER'] != "root"):
if(not dryrun):
doError("root access is required", False)
check = "NO"
res[" have root access: "] = check
# check sysfs is mounted
check = "YES"
if(not os.path.exists(sysvals.powerfile)):
if(not dryrun):
doError("sysfs must be mounted", False)
check = "NO"
res[" is sysfs mounted: "] = check
# check target mode is a valid mode
check = "YES"
modes = getModes()
if(sysvals.suspendmode not in modes):
if(not dryrun):
doError("%s is not a value power mode" % sysvals.suspendmode, False)
check = "NO"
res[" is "+sysvals.suspendmode+" a power mode: "] = check
# check if ftrace is available
if(data.useftrace):
check = "YES"
if(not verifyFtrace()):
if(not dryrun):
doError("ftrace is not configured", False)
check = "NO"
res[" is ftrace usable: "] = check
# check if rtcwake
if(sysvals.rtcwake):
check = "YES"
version = os.popen("rtcwake -V 2>/dev/null").read()
if(not version.startswith("rtcwake")):
if(not dryrun):
doError("rtcwake is not installed", False)
check = "NO"
res[" is rtcwake usable: "] = check
if(dryrun):
status = True
print("Checking if system can run the current command:")
for r in res:
print("%s\t%s" % (r, res[r]))
if(res[r] != "YES"):
status = False
if(status):
print("SUCCESS: The command should run!")
else:
print("FAILURE: The command won't run!")
def printHelp():
global sysvals
modes = getModes()
print("")
print("AnalyzeSuspend")
print("Usage: sudo analyze_suspend.py <options>")
print("")
print("Description:")
print(" Initiates a system suspend/resume while capturing dmesg")
print(" and (optionally) ftrace data to analyze device timing")
print("")
print(" Generates output files in subdirectory: suspend-mmddyy-HHMMSS")
print(" HTML output: <hostname>_<mode>.html")
print(" raw dmesg output: <hostname>_<mode>_dmesg.txt")
print(" raw ftrace output (with -f): <hostname>_<mode>_ftrace.txt")
print("")
print("Options:")
print(" [general]")
print(" -h Print this help text")
print(" -verbose Print extra information during execution and analysis")
print(" -status Test to see if the system is enabled to run this tool")
print(" -modes List available suspend modes")
print(" -m mode Mode to initiate for suspend %s (default: %s)") % (modes, sysvals.suspendmode)
print(" -rtcwake Use rtcwake to autoresume after 10 seconds (default: disabled)")
print(" -f Use ftrace to create device callgraphs (default: disabled)")
print(" [re-analyze data from previous runs]")
print(" -dmesg dmesgfile Create HTML timeline from dmesg file")
print(" -ftrace ftracefile Create HTML callgraph from ftrace file")
print("")
return True
def doError(msg, help):
print("ERROR: %s") % msg
if(help == True):
printHelp()
sys.exit()
# -- script main --
# loop through the command line arguments
cmd = ""
args = iter(sys.argv[1:])
for arg in args:
if(arg == "-m"):
try:
val = args.next()
except:
doError("No mode supplied", True)
sysvals.suspendmode = val
elif(arg == "-f"):
data.useftrace = True
elif(arg == "-modes"):
cmd = "modes"
elif(arg == "-status"):
cmd = "status"
elif(arg == "-verbose"):
data.verbose = True
elif(arg == "-rtcwake"):
sysvals.rtcwake = True
elif(arg == "-dmesg"):
try:
val = args.next()
except:
doError("No dmesg file supplied", True)
data.notestrun = True
data.usedmesg = True
sysvals.dmesgfile = val
elif(arg == "-ftrace"):
try:
val = args.next()
except:
doError("No ftrace file supplied", True)
data.notestrun = True
data.useftrace = True
sysvals.ftracefile = val
elif(arg == "-h"):
printHelp()
sys.exit()
else:
doError("Invalid argument: "+arg, True)
# just run a utility command and exit
if(cmd != ""):
if(cmd == "status"):
statusCheck(True)
elif(cmd == "modes"):
modes = getModes()
print modes
sys.exit()
data.initialize()
# if instructed, re-analyze existing data files
if(data.notestrun):
sysvals.setOutputFile()
data.vprint("Output file: %s" % sysvals.htmlfile)
if(sysvals.dmesgfile != ""):
analyzeKernelLog()
if(sysvals.ftracefile != ""):
analyzeTraceLog()
createHTML()
sys.exit()
# verify that we can run a test
data.usedmesg = True
statusCheck(False)
# prepare for the test
if(data.useftrace):
initFtrace()
sysvals.initTestOutput()
data.vprint("Output files:\n %s" % sysvals.dmesgfile)
if(data.useftrace):
data.vprint(" %s" % sysvals.ftracefile)
data.vprint(" %s" % sysvals.htmlfile)
# execute the test
executeSuspend()
analyzeKernelLog()
if(data.useftrace):
analyzeTraceLog()
createHTML()
| gpl-2.0 |
ashemedai/ansible | lib/ansible/modules/cloud/amazon/elasticache_parameter_group.py | 28 | 13401 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: elasticache_parameter_group
short_description: Manage cache security groups in Amazon Elasticache.
description:
- Manage cache security groups in Amazon Elasticache.
- Returns information about the specified cache cluster.
version_added: "2.3"
author: "Sloane Hertel (@s-hertel)"
options:
group_family:
description:
- The name of the cache parameter group family that the cache parameter group can be used with.
choices: ['memcached1.4', 'redis2.6', 'redis2.8', 'redis3.2']
required: yes
name:
description:
- A user-specified name for the cache parameter group.
required: yes
description:
description:
- A user-specified description for the cache parameter group.
state:
description:
- Idempotent actions that will create/modify, destroy, or reset a cache parameter group as needed.
choices: ['present', 'absent', 'reset']
required: true
values:
description:
- A user-specified list of parameters to reset or modify for the cache parameter group.
required: no
default: None
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
---
- hosts: localhost
connection: local
tasks:
- name: 'Create a test parameter group'
elasticache_parameter_group:
name: 'test-param-group'
group_family: 'redis3.2'
description: 'This is a cache parameter group'
state: 'present'
- name: 'Modify a test parameter group'
elasticache_parameter_group:
name: 'test-param-group'
values:
- ['activerehashing', 'yes']
- ['client-output-buffer-limit-normal-hard-limit', 4]
state: 'present'
- name: 'Reset all modifiable parameters for the test parameter group'
elasticache_parameter_group:
name: 'test-param-group'
state: reset
- name: 'Delete a test parameter group'
elasticache_parameter_group:
name: 'test-param-group'
state: 'absent'
"""
RETURN = """
elasticache:
description: cache parameter group information and response metadata
returned: always
type: dict
sample:
cache_parameter_group:
cache_parameter_group_family: redis3.2
cache_parameter_group_name: test-please-delete
description: "initial description"
response_metadata:
http_headers:
content-length: "562"
content-type: text/xml
date: "Mon, 06 Feb 2017 22:14:08 GMT"
x-amzn-requestid: 947291f9-ecb9-11e6-85bd-3baa4eca2cc1
http_status_code: 200
request_id: 947291f9-ecb9-11e6-85bd-3baa4eca2cc1
retry_attempts: 0
changed:
description: if the cache parameter group has changed
returned: always
type: bool
sample:
changed: true
"""
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict
from ansible.module_utils.six import text_type
import traceback
try:
import boto3
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def create(module, conn, name, group_family, description):
""" Create ElastiCache parameter group. """
try:
response = conn.create_cache_parameter_group(CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description)
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg="Unable to create cache parameter group.", exception=traceback.format_exc())
return response, changed
def delete(module, conn, name):
""" Delete ElastiCache parameter group. """
try:
conn.delete_cache_parameter_group(CacheParameterGroupName=name)
response = {}
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg="Unable to delete cache parameter group.", exception=traceback.format_exc())
return response, changed
def make_current_modifiable_param_dict(module, conn, name):
""" Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}"""
current_info = get_info(conn, name)
if current_info is False:
module.fail_json(msg="Could not connect to the cache parameter group %s." % name)
parameters = current_info["Parameters"]
modifiable_params = {}
for param in parameters:
if param["IsModifiable"] and ("AllowedValues" and "ParameterValue") in param:
modifiable_params[param["ParameterName"]] = [param["AllowedValues"], param["DataType"], param["ParameterValue"]]
return modifiable_params
def check_valid_modification(module, values, modifiable_params):
""" Check if the parameters and values in values are valid. """
changed_with_update = False
for parameter in values:
new_value = values[parameter]
# check valid modifiable parameters
if parameter not in modifiable_params:
module.fail_json(msg="%s is not a modifiable parameter. Valid parameters to modify are: %s." % (parameter, modifiable_params.keys()))
# check allowed datatype for modified parameters
str_to_type = {"integer": int, "string": text_type}
if not isinstance(new_value, str_to_type[modifiable_params[parameter][1]]):
module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." %
(new_value, type(new_value), parameter, modifiable_params[parameter][1]))
# check allowed values for modifiable parameters
if text_type(new_value) not in modifiable_params[parameter][0] and not isinstance(new_value, int):
module.fail_json(msg="%s is not an allowed value for the parameter %s. Valid parameters are: %s." %
(new_value, parameter, modifiable_params[parameter][0]))
# check if a new value is different from current value
if text_type(new_value) != modifiable_params[parameter][2]:
changed_with_update = True
return changed_with_update
def check_changed_parameter_values(values, old_parameters, new_parameters):
""" Checking if the new values are different than the old values. """
changed_with_update = False
# if the user specified parameters to reset, only check those for change
if values:
for parameter in values:
if old_parameters[parameter] != new_parameters[parameter]:
changed_with_update = True
break
# otherwise check all to find a change
else:
for parameter in old_parameters:
if old_parameters[parameter] != new_parameters[parameter]:
changed_with_update = True
break
return changed_with_update
def modify(module, conn, name, values):
""" Modify ElastiCache parameter group to reflect the new information if it differs from the current. """
# compares current group parameters with the parameters we've specified to to a value to see if this will change the group
format_parameters = []
for key in values:
value = text_type(values[key])
format_parameters.append({'ParameterName': key, 'ParameterValue': value})
try:
response = conn.modify_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters)
except boto.exception.BotoServerError as e:
module.fail_json(msg="Unable to modify cache parameter group.", exception=traceback.format_exc())
return response
def reset(module, conn, name, values):
""" Reset ElastiCache parameter group if the current information is different from the new information. """
# used to compare with the reset parameters' dict to see if there have been changes
old_parameters_dict = make_current_modifiable_param_dict(module, conn, name)
format_parameters = []
# determine whether to reset all or specific parameters
if values:
all_parameters = False
format_parameters = []
for key in values:
value = text_type(values[key])
format_parameters.append({'ParameterName': key, 'ParameterValue': value})
else:
all_parameters = True
try:
response = conn.reset_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters)
except boto.exception.BotoServerError as e:
module.fail_json(msg="Unable to reset cache parameter group.", exception=traceback.format_exc())
# determine changed
new_parameters_dict = make_current_modifiable_param_dict(module, conn, name)
changed = check_changed_parameter_values(values, old_parameters_dict, new_parameters_dict)
return response, changed
def get_info(conn, name):
""" Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access. """
try:
data = conn.describe_cache_parameters(CacheParameterGroupName=name)
return data
except botocore.exceptions.ClientError as e:
return False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
group_family=dict(type='str', choices=['memcached1.4', 'redis2.6', 'redis2.8', 'redis3.2']),
name=dict(required=True, type='str'),
description=dict(type='str'),
state=dict(required=True),
values=dict(type='dict'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto required for this module')
parameter_group_family = module.params.get('group_family')
parameter_group_name = module.params.get('name')
group_description = module.params.get('description')
state = module.params.get('state')
values = module.params.get('values')
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")
connection = boto3_conn(module, conn_type='client',
resource='elasticache', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
exists = get_info(connection, parameter_group_name)
# check that the needed requirements are available
if state == 'present' and not exists and not (parameter_group_family or group_description):
module.fail_json(msg="Creating a group requires a family group and a description.")
elif state == 'reset' and not exists:
module.fail_json(msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name)
# Taking action
changed = False
if state == 'present':
if exists:
# confirm that the group exists without any actions
if not values:
response = exists
changed = False
# modify existing group
else:
modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name)
changed = check_valid_modification(module, values, modifiable_params)
response = modify(module, connection, parameter_group_name, values)
# create group
else:
response, changed = create(module, connection, parameter_group_name, parameter_group_family, group_description)
if values:
modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name)
changed = check_valid_modification(module, values, modifiable_params)
response = modify(module, connection, parameter_group_name, values)
elif state == 'absent':
if exists:
# delete group
response, changed = delete(module, connection, parameter_group_name)
else:
response = {}
changed = False
elif state == 'reset':
response, changed = reset(module, connection, parameter_group_name, values)
facts_result = dict(changed=changed, elasticache=camel_dict_to_snake_dict(response))
module.exit_json(**facts_result)
if __name__ == '__main__':
main()
| gpl-3.0 |
gnieboer/tensorflow | tensorflow/contrib/learn/python/learn/datasets/text_datasets.py | 124 | 2703 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Text datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.platform import gfile
DBPEDIA_URL = 'https://github.com/le-scientifique/torchDatasets/raw/master/dbpedia_csv.tar.gz'
def maybe_download_dbpedia(data_dir):
"""Download if DBpedia data is not present."""
train_path = os.path.join(data_dir, 'dbpedia_csv/train.csv')
test_path = os.path.join(data_dir, 'dbpedia_csv/test.csv')
if not (gfile.Exists(train_path) and gfile.Exists(test_path)):
archive_path = base.maybe_download(
'dbpedia_csv.tar.gz', data_dir, DBPEDIA_URL)
tfile = tarfile.open(archive_path, 'r:*')
tfile.extractall(data_dir)
def load_dbpedia(size='small', test_with_fake_data=False):
"""Get DBpedia datasets from CSV files."""
if not test_with_fake_data:
data_dir = os.path.join(os.getenv('TF_EXP_BASE_DIR', ''), 'dbpedia_data')
maybe_download_dbpedia(data_dir)
train_path = os.path.join(data_dir, 'dbpedia_csv', 'train.csv')
test_path = os.path.join(data_dir, 'dbpedia_csv', 'test.csv')
if size == 'small':
# Reduce the size of original data by a factor of 1000.
base.shrink_csv(train_path, 1000)
base.shrink_csv(test_path, 1000)
train_path = train_path.replace('train.csv', 'train_small.csv')
test_path = test_path.replace('test.csv', 'test_small.csv')
else:
module_path = os.path.dirname(__file__)
train_path = os.path.join(module_path, 'data', 'text_train.csv')
test_path = os.path.join(module_path, 'data', 'text_test.csv')
train = base.load_csv_without_header(
train_path, target_dtype=np.int32, features_dtype=np.str, target_column=0)
test = base.load_csv_without_header(
test_path, target_dtype=np.int32, features_dtype=np.str, target_column=0)
return base.Datasets(train=train, validation=None, test=test)
| apache-2.0 |
gallifrey17/eden | modules/s3db/project.py | 3 | 374876 | # -*- coding: utf-8 -*-
""" Sahana Eden Project Model
@copyright: 2011-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import division
__all__ = ("S3ProjectModel",
"S3ProjectActivityModel",
"S3ProjectActivityTypeModel",
"S3ProjectActivityOrganisationModel",
"S3ProjectActivitySectorModel",
"S3ProjectAnnualBudgetModel",
"S3ProjectBeneficiaryModel",
"S3ProjectCampaignModel",
"S3ProjectFrameworkModel",
"S3ProjectHazardModel",
"S3ProjectHRModel",
#"S3ProjectIndicatorModel",
"S3ProjectLocationModel",
"S3ProjectOrganisationModel",
"S3ProjectPlanningModel",
"S3ProjectProgrammeModel",
"S3ProjectSectorModel",
"S3ProjectStatusModel",
"S3ProjectThemeModel",
"S3ProjectDRRModel",
"S3ProjectDRRPPModel",
"S3ProjectTaskModel",
"S3ProjectTaskHRMModel",
"S3ProjectTaskIReportModel",
"project_ActivityRepresent",
"project_activity_year_options",
"project_ckeditor",
"project_Details",
"project_rheader",
"project_task_controller",
"project_theme_help_fields",
"project_hazard_help_fields",
"project_hfa_opts",
"project_jnap_opts",
"project_pifacc_opts",
"project_rfa_opts",
"project_project_filters",
"project_project_list_layout",
"project_task_list_layout",
"project_TaskRepresent",
)
import datetime
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import *
from gluon.storage import Storage
from s3dal import Row
from ..s3 import *
from s3layouts import S3AddResourceLink
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
class S3ProjectModel(S3Model):
"""
Project Model
Note: This module can be extended by 2 different modes:
- '3w': "Who's doing What Where"
suitable for use by multinational organisations tracking
projects at a high level
- sub-mode 'drr': Disaster Risk Reduction extensions
- 'task': Suitable for use by a smaller organsiation tracking tasks
within projects
There are also a number of other deployment_settings to control behaviour
This class contains the tables common to all uses
There are additional tables in other Models
"""
names = ("project_project",
"project_project_id",
"project_project_represent",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
NONE = current.messages["NONE"]
settings = current.deployment_settings
mode_3w = settings.get_project_mode_3w()
mode_task = settings.get_project_mode_task()
mode_drr = settings.get_project_mode_drr()
budget_monitoring = settings.get_project_budget_monitoring()
multi_budgets = settings.get_project_multiple_budgets()
multi_orgs = settings.get_project_multiple_organisations()
programmes = settings.get_project_programmes()
use_codes = settings.get_project_codes()
use_indicators = settings.get_project_indicators()
use_sectors = settings.get_project_sectors()
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# Projects
#
LEAD_ROLE = settings.get_project_organisation_lead_role()
org_label = settings.get_project_organisation_roles()[LEAD_ROLE]
tablename = "project_project"
define_table(tablename,
super_link("doc_id", "doc_entity"),
super_link("budget_entity_id", "budget_entity"),
# multi_orgs deployments use the separate project_organisation table
# - although Lead Org is still cached here to avoid the need for a virtual field to lookup
self.org_organisation_id(
default = auth.root_org(),
label = org_label,
requires = self.org_organisation_requires(
required = True,
# Only allowed to add Projects for Orgs
# that the user has write access to
updateable = True,
),
),
Field("name", unique=True, length=255,
label = T("Project Name"),
# Require unique=True if using IS_NOT_ONE_OF like here (same table,
# no filter) in order to allow both automatic indexing (faster)
# and key-based de-duplication (i.e. before field validation)
requires = [IS_NOT_EMPTY(error_message=T("Please fill this!")),
IS_NOT_ONE_OF(db, "project_project.name")]
),
Field("code", length=128,
label = T("Short Title / ID"),
readable = use_codes,
writable = use_codes,
),
Field("description", "text",
label = T("Description"),
),
self.project_status_id(),
# NB There is additional client-side validation for start/end date in the Controller
s3_date("start_date",
label = T("Start Date"),
set_min = "#project_project_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#project_project_start_date",
start_field = "project_project_start_date",
default_interval = 12,
),
# Free-text field with no validation (used by OCHA template currently)
Field("duration",
label = T("Duration"),
readable = False,
writable = False,
),
Field("calendar",
label = T("Calendar"),
readable = mode_task,
writable = mode_task,
requires = IS_EMPTY_OR(IS_URL()),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Calendar"),
T("URL to a Google Calendar to display on the project timeline."))),
),
# multi_budgets deployments handle on the Budgets Tab
# buget_monitoring deployments handle as inline component
Field("budget", "double",
label = T("Budget"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
readable = False if (multi_budgets or budget_monitoring) else True,
writable = False if (multi_budgets or budget_monitoring) else True,
),
s3_currency(readable = False if (multi_budgets or budget_monitoring) else True,
writable = False if (multi_budgets or budget_monitoring) else True,
),
Field("objectives", "text",
label = T("Objectives"),
represent = lambda v: v or NONE,
readable = mode_3w,
writable = mode_3w,
),
self.hrm_human_resource_id(label = T("Contact Person"),
),
Field("current_status_by_indicators", "float",
default = 0.0,
label = T("Current Indicators Status"),
represent = project_status_represent,
readable = use_indicators,
writable = False,
),
Field("overall_status_by_indicators", "float",
default = 0.0,
label = T("Overall Indicators Status"),
represent = project_status_represent,
readable = use_indicators,
writable = False,
),
#Field("current_status_by_budget", "float",
# label = T("Current Budget Status"),
# represent = project_status_represent,
# readable = use_indicators,
# writable = False,
# ),
#Field("overall_status_by_budget", "float",
# label = T("Overall Budget Status"),
# represent = project_status_represent,
# readable = use_indicators,
# writable = False,
# ),
Field.Method("total_annual_budget",
self.project_total_annual_budget),
Field.Method("total_organisation_amount",
self.project_total_organisation_amount),
s3_comments(comment=DIV(_class="tooltip",
_title="%s|%s" % (T("Comments"),
T("Outcomes, Impact, Challenges"))),
),
*s3_meta_fields())
# CRUD Strings
ADD_PROJECT = T("Create Project")
crud_strings[tablename] = Storage(
label_create = ADD_PROJECT,
title_display = T("Project Details"),
title_list = T("Projects"),
title_update = T("Edit Project"),
title_report = T("Project Report"),
title_upload = T("Import Projects"),
label_list_button = T("List Projects"),
label_delete_button = T("Delete Project"),
msg_record_created = T("Project added"),
msg_record_modified = T("Project updated"),
msg_record_deleted = T("Project deleted"),
msg_list_empty = T("No Projects currently registered"))
# Filter widgets
filter_widgets = project_project_filters(org_label=org_label)
# Resource Configuration
if settings.get_project_theme_percentages():
create_next = URL(c="project", f="project",
args=["[id]", "theme"])
elif mode_task:
if settings.get_project_milestones():
create_next = URL(c="project", f="project",
args=["[id]", "milestone"])
else:
create_next = URL(c="project", f="project",
args=["[id]", "task"])
else:
# Default
create_next = None
list_fields = ["id"]
append = list_fields.append
if use_codes:
append("code")
append("name")
append("organisation_id")
if mode_3w:
append((T("Locations"), "location.location_id"))
if programmes:
append((T("Program"), "programme.name"))
if use_sectors:
append((T("Sectors"), "sector.name"))
if mode_drr:
append((T("Hazards"), "hazard.name"))
#append("drr.hfa")
append((T("Themes"), "theme.name"))
if multi_orgs:
append((T("Total Funding Amount"), "total_organisation_amount"))
if budget_monitoring:
append((T("Total Budget"), "budget.total_budget"))
elif multi_budgets:
append((T("Total Annual Budget"), "total_annual_budget"))
else:
append((T("Total Budget"), "budget"))
list_fields += ["start_date",
"end_date",
"location.location_id",
]
report_fields = list_fields
report_col_default = "location.location_id"
report_fact_fields = [(field, "count") for field in report_fields]
report_fact_default = "count(organisation_id)"
#report_fact_default = "count(theme.name)"
configure(tablename,
context = {"location": "location.location_id",
"organisation": "organisation_id",
},
create_next = create_next,
deduplicate = self.project_project_deduplicate,
filter_widgets = filter_widgets,
list_fields = list_fields,
list_layout = project_project_list_layout,
onaccept = self.project_project_onaccept,
realm_components = ("human_resource",
"task",
"organisation",
"activity",
"activity_type",
"annual_budget",
"beneficiary",
"location",
"milestone",
"theme_percentage",
"document",
"image",
),
report_options = Storage(
rows=report_fields,
cols=report_fields,
fact=report_fact_fields,
defaults=Storage(
rows="hazard.name",
cols=report_col_default,
fact=report_fact_default,
totals=True
)
),
super_entity = ("doc_entity", "budget_entity"),
update_realm = True,
)
# Reusable Field
if use_codes:
project_represent = S3Represent(lookup=tablename,
field_sep = ": ",
fields=["code", "name"])
else:
project_represent = S3Represent(lookup=tablename)
project_id = S3ReusableField("project_id", "reference %s" % tablename,
label = T("Project"),
ondelete = "CASCADE",
represent = project_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_project.id",
project_represent,
updateable = True,
)
),
sortby = "name",
comment = S3AddResourceLink(c="project", f="project",
tooltip=T("If you don't see the project in the list, you can add a new one by clicking link 'Create Project'.")),
)
# Custom Methods
set_method("project", "project",
method = "assign",
action = self.hrm_AssignMethod(component="human_resource"))
set_method("project", "project",
method = "details",
action = project_Details)
set_method("project", "project",
method = "map",
action = self.project_map)
set_method("project", "project",
method = "timeline",
action = self.project_timeline)
# Components
add_components(tablename,
# Sites
#project_site = "project_id",
# Activities
project_activity = "project_id",
# Activity Types
project_activity_type = {"link": "project_activity_type_project",
"joinby": "project_id",
"key": "activity_type_id",
"actuate": "link",
},
# Goals
project_goal = "project_id",
# Indicators
project_indicator = "project_id",
project_indicator_data = "project_id",
#project_indicator_data = "project_id",
# Milestones
project_milestone = "project_id",
# Outcomes
project_outcome = "project_id",
# Outputs
project_output = "project_id",
# Tasks
project_task = {"link": "project_task_project",
"joinby": "project_id",
"key": "task_id",
"actuate": "replace",
"autocomplete": "name",
"autodelete": False,
},
# Annual Budgets
project_annual_budget = "project_id",
# Beneficiaries
project_beneficiary = "project_id",
# Hazards
project_hazard = {"link": "project_hazard_project",
"joinby": "project_id",
"key": "hazard_id",
"actuate": "hide",
},
# Human Resources
project_human_resource = "project_id",
hrm_human_resource = {"link": "project_human_resource",
"joinby": "project_id",
"key": "human_resource_id",
"actuate": "hide",
},
# Locations
project_location = "project_id",
# Sectors
org_sector = {"link": "project_sector_project",
"joinby": "project_id",
"key": "sector_id",
"actuate": "hide",
},
# Format needed by S3Filter
project_sector_project = ("project_id",
{"joinby": "project_id",
"multiple": False,
},
),
# Themes
project_theme = {"link": "project_theme_project",
"joinby": "project_id",
"key": "theme_id",
"actuate": "hide",
},
# Programmes
project_programme = {"link": "project_programme_project",
"joinby": "project_id",
"key": "programme_id",
"actuate": "hide",
"multiple": False,
},
# Format needed by S3Filter
project_theme_project = "project_id",
)
if multi_orgs:
add_components(tablename,
project_organisation = (# Organisations
"project_id",
# Donors
{"name": "donor",
"joinby": "project_id",
"filterby": "role",
# Works for IFRC & DRRPP:
"filterfor": (3,),
},
# Partners
{"name": "partner",
"joinby": "project_id",
"filterby": "role",
# Works for IFRC & DRRPP:
"filterfor": (2, 9),
},
),
)
# DRR
if mode_drr:
add_components(tablename,
project_drr = {"joinby": "project_id",
"multiple": False,
},
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(project_project_id = project_id,
project_project_represent = project_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for model-global names if module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(project_project_id = lambda **attr: dummy("project_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def project_current_indicator_status(row):
"""
Summary of Current Indicator Status
@ToDo: Make this configurable
"""
if hasattr(row, "project_project"):
row = row.project_project
if hasattr(row, "id"):
project_id = row["id"]
else:
return current.messages["NONE"]
table = current.s3db.project_indicator_data
query = (table.deleted != True) & \
(table.project_id == project_id)
rows = current.db(query).select(table.indicator_id,
table.end_date,
table.target_value, # Needed for Field.Method() to avoid extra DB call
table.value, # Needed for Field.Method() to avoid extra DB call
)
indicators = {}
for row in rows:
indicator_id = row.indicator_id
if indicator_id in indicators:
old_date = indicators[indicator_id]["date"]
new_date = row.end_date
if datetime.datetime(old_date.year, new_date.month, new_date.day) > datetime.datetime(old_date.year, old_date.month, old_date.day):
# This is more current so replace with this
indicators[indicator_id].update(date=new_date,
percentage=row.percentage())
else:
indicators[indicator_id] = {"date": row.end_date,
"percentage": row.percentage(),
}
len_indicators = len(indicators)
if not len_indicators:
# Can't divide by Zero
return 0
NONE = current.messages["NONE"]
percentages = 0
for indicator_id in indicators:
percentage = indicators[indicator_id]["percentage"]
if percentage != NONE:
percentages += float(percentage[:-1])
return percentages / len_indicators
# -------------------------------------------------------------------------
@staticmethod
def project_total_annual_budget(row):
""" Total of all annual budgets for project """
if not current.deployment_settings.get_project_multiple_budgets():
return 0
if "project_project" in row:
project_id = row["project_project.id"]
elif "id" in row:
project_id = row["id"]
else:
return 0
table = current.s3db.project_annual_budget
query = (table.deleted != True) & \
(table.project_id == project_id)
sum_field = table.amount.sum()
return current.db(query).select(sum_field).first()[sum_field] or \
current.messages["NONE"]
# -------------------------------------------------------------------------
@staticmethod
def project_total_organisation_amount(row):
""" Total of project_organisation amounts for project """
if not current.deployment_settings.get_project_multiple_organisations():
return 0
if "project_project" in row:
project_id = row["project_project.id"]
elif "id" in row:
project_id = row["id"]
else:
return 0
table = current.s3db.project_organisation
query = (table.deleted != True) & \
(table.project_id == project_id)
sum_field = table.amount.sum()
return current.db(query).select(sum_field).first()[sum_field]
# -------------------------------------------------------------------------
@staticmethod
def project_project_onaccept(form):
"""
After DB I/O tasks for Project records
"""
settings = current.deployment_settings
if settings.get_project_multiple_organisations():
# Create/update project_organisation record from the organisation_id
# (Not in form.vars if added via component tab)
form_vars = form.vars
id = form_vars.id
organisation_id = form_vars.organisation_id or \
current.request.post_vars.organisation_id
if organisation_id:
lead_role = settings.get_project_organisation_lead_role()
otable = current.s3db.project_organisation
query = (otable.project_id == id) & \
(otable.role == lead_role)
# Update the lead organisation
count = current.db(query).update(organisation_id = organisation_id)
if not count:
# If there is no record to update, then create a new one
otable.insert(project_id = id,
organisation_id = organisation_id,
role = lead_role,
)
# -------------------------------------------------------------------------
@staticmethod
def project_project_deduplicate(item):
""" Import item de-duplication """
data = item.data
# If we have a code, then assume this is unique, however the same
# project name may be used in multiple locations
code = data.get("code")
if code:
table = item.table
query = (table.code.lower() == code.lower())
else:
name = data.get("name")
if name:
table = item.table
query = (table.name.lower() == name.lower())
else:
# Nothing we can work with
return
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def project_map(r, **attr):
"""
Display a filterable set of Projects on a Map
- assumes mode_3w
- currently assumes that theme_percentages=True
@ToDo: Browse by Year
"""
if r.representation == "html" and \
r.name == "project":
T = current.T
db = current.db
s3db = current.s3db
response = current.response
ptable = s3db.project_project
ttable = s3db.project_theme
tptable = s3db.project_theme_project
ltable = s3db.gis_location
# Search Widget
themes_dropdown = SELECT(_multiple=True,
_id="project_theme_id",
_style="height:80px")
append = themes_dropdown.append
table = current.s3db.project_theme
themes = current.db(table.deleted == False).select(table.id,
table.name,
orderby=table.name)
for theme in themes:
append(OPTION(theme.name,
_value=theme.id,
_selected="selected"))
form = FORM(themes_dropdown)
# Map
# The Layer of Projects to show on the Map
# @ToDo: Create a URL to the project_polygons custom method & use that
# @ToDo: Pass through attributes that we don't need for the 1st level of mapping
# so that they can be used without a screen refresh
url = URL(f="location", extension="geojson")
layer = {"name" : T("Projects"),
"id" : "projects",
"tablename" : "project_location",
"url" : url,
"active" : True,
#"marker" : None,
}
map = current.gis.show_map(collapsed = True,
feature_resources = [layer],
)
output = dict(title = T("Projects Map"),
form = form,
map = map,
)
# Add Static JS
response.s3.scripts.append(URL(c="static",
f="scripts",
args=["S3", "s3.project_map.js"]))
response.view = "map.html"
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# -------------------------------------------------------------------------
@staticmethod
def project_polygons(r, **attr):
"""
Export Projects as GeoJSON Polygons to view on the map
- currently assumes that theme_percentages=True
@ToDo: complete
"""
db = current.db
s3db = current.s3db
ptable = s3db.project_project
ttable = s3db.project_theme
tptable = s3db.project_theme_project
pltable = s3db.project_location
ltable = s3db.gis_location
#get_vars = current.request.get_vars
themes = db(ttable.deleted == False).select(ttable.id,
ttable.name,
orderby = ttable.name)
# Total the Budget spent by Theme for each country
countries = {}
query = (ptable.deleted == False) & \
(tptable.project_id == ptable.id) & \
(ptable.id == pltable.project_id) & \
(ltable.id == pltable.location_id)
#if "theme_id" in get_vars:
# query = query & (tptable.id.belongs(get_vars.theme_id))
projects = db(query).select()
for project in projects:
# Only show those projects which are only within 1 country
# @ToDo
_countries = project.location_id
if len(_countries) == 1:
country = _countries[0]
if country in countries:
budget = project.project_project.total_annual_budget()
theme = project.project_theme_project.theme_id
percentage = project.project_theme_project.percentage
countries[country][theme] += budget * percentage
else:
name = db(ltable.id == country).select(ltable.name).first().name
countries[country] = dict(name = name)
# Init all themes to 0
for theme in themes:
countries[country][theme.id] = 0
# Add value for this record
budget = project.project_project.total_annual_budget()
theme = project.project_theme_project.theme_id
percentage = project.project_theme_project.percentage
countries[country][theme] += budget * percentage
query = (ltable.id.belongs(countries))
locations = db(query).select(ltable.id,
ltable.wkt)
for location in locations:
pass
# Convert to GeoJSON
output = json.dumps({})
current.response.headers["Content-Type"] = "application/json"
return output
# -------------------------------------------------------------------------
@staticmethod
def project_timeline(r, **attr):
"""
Display the project on a Simile Timeline
http://www.simile-widgets.org/wiki/Reference_Documentation_for_Timeline
Currently this just displays a Google Calendar
@ToDo: Add Milestones
@ToDo: Filters for different 'layers'
@ToDo: export milestones/tasks as .ics
"""
if r.representation == "html" and r.name == "project":
appname = current.request.application
response = current.response
s3 = response.s3
calendar = r.record.calendar
# Add core Simile Code
s3.scripts.append("/%s/static/scripts/simile/timeline/timeline-api.js" % appname)
# Pass vars to our JS code
s3.js_global.append('''S3.timeline.calendar="%s"''' % calendar)
# Add our control script
if s3.debug:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.min.js" % appname)
# Create the DIV
item = DIV(_id="s3timeline",
_class="s3-timeline",
)
output = dict(item=item)
output["title"] = current.T("Project Calendar")
# Maintain RHeader for consistency
if "rheader" in attr:
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
response.view = "timeline.html"
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# =============================================================================
class S3ProjectActivityModel(S3Model):
"""
Project Activity Model
This model holds the specific Activities for Projects
- currently used in mode_task but not mode_3w
"""
names = ("project_activity",
"project_activity_id",
"project_activity_activity_type",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
add_components = self.add_components
crud_strings = s3.crud_strings
define_table = self.define_table
settings = current.deployment_settings
mode_task = settings.get_project_mode_task()
# ---------------------------------------------------------------------
# Project Activity
#
tablename = "project_activity"
define_table(tablename,
# Instance
self.super_link("doc_id", "doc_entity"),
# Component (each Activity can link to a single Project)
self.project_project_id(),
Field("name",
label = T("Description"),
# Activity can simply be a Distribution
#requires = IS_NOT_EMPTY(),
),
self.project_status_id(),
# An Activity happens at a single Location
self.gis_location_id(readable = not mode_task,
writable = not mode_task,
),
s3_date("date",
label = T("Start Date"),
set_min = "#project_activity_end_date",
),
s3_date("end_date",
label = T("End Date"),
set_max = "#project_activity_date",
start_field = "project_activity_date",
default_interval = 12,
),
# Which contact is this?
# Implementing Org should be a human_resource_id
# Beneficiary could be a person_id
# Either way label should be clear
self.pr_person_id(label = T("Contact Person"),
requires = IS_ADD_PERSON_WIDGET2(allow_empty=True),
widget = S3AddPersonWidget2(controller="pr"),
),
Field("time_estimated", "double",
label = "%s (%s)" % (T("Time Estimate"),
T("hours")),
readable = mode_task,
writable = mode_task,
),
Field("time_actual", "double",
label = "%s (%s)" % (T("Time Taken"),
T("hours")),
readable = mode_task,
# Gets populated from constituent Tasks
writable = False,
),
# @ToDo: Move to compute using stats_year
Field.Method("year", self.project_activity_year),
#Field("year", "list:integer",
# compute = lambda row: \
# self.stats_year(row, "project_activity"),
# label = T("Year"),
# ),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ACTIVITY_TOOLTIP = T("If you don't see the activity in the list, you can add a new one by clicking link 'Create Activity'.")
ADD_ACTIVITY = T("Create Activity")
crud_strings[tablename] = Storage(
label_create = ADD_ACTIVITY,
title_display = T("Activity Details"),
title_list = T("Activities"),
title_update = T("Edit Activity"),
title_upload = T("Import Activity Data"),
title_report = T("Activity Report"),
label_list_button = T("List Activities"),
msg_record_created = T("Activity Added"),
msg_record_modified = T("Activity Updated"),
msg_record_deleted = T("Activity Deleted"),
msg_list_empty = T("No Activities Found")
)
# Search Method
filter_widgets = [S3OptionsFilter("status_id",
label = T("Status"),
# Doesn't support translation
#represent = "%(name)s",
# @ToDo: Introspect cols
cols = 3,
),
]
# Resource Configuration
use_projects = settings.get_project_projects()
list_fields = ["id",
"name",
"comments",
]
default_row = "project_id"
default_col = "name"
default_fact = "count(id)"
report_fields = [(T("Activity"), "name"),
(T("Year"), "year"),
]
rappend = report_fields.append
fact_fields = [(T("Number of Activities"), "count(id)"),
]
if settings.get_project_activity_types():
list_fields.insert(1, "activity_type.name")
rappend((T("Activity Type"), "activity_type.name"))
default_col = "activity_type.name"
filter_widgets.append(
S3OptionsFilter("activity_activity_type.activity_type_id",
label = T("Type"),
# Doesn't support translation
#represent="%(name)s",
))
if use_projects:
list_fields.insert(0, "project_id")
rappend((T("Project"), "project_id"))
filter_widgets.insert(1,
S3OptionsFilter("project_id",
represent = "%(name)s",
))
if settings.get_project_sectors():
rappend("sector_activity.sector_id")
default_col = "sector_activity.sector_id"
filter_widgets.append(
S3OptionsFilter("sector_activity.sector_id",
# Doesn't support translation
#represent = "%(name)s",
))
if settings.get_project_themes():
rappend("theme_activity.theme_id")
filter_widgets.append(
S3OptionsFilter("theme_activity.theme_id",
# Doesn't support translation
#represent = "%(name)s",
))
# @ToDo: deployment_setting
if settings.has_module("stats"):
rappend("beneficiary.parameter_id")
fact_fields.insert(0,
(T("Number of Beneficiaries"), "sum(beneficiary.value)")
)
default_fact = "sum(beneficiary.value)"
filter_widgets.append(
S3OptionsFilter("beneficiary.parameter_id",
# Doesn't support translation
#represent = "%(name)s",
))
if settings.get_project_activity_filter_year():
filter_widgets.append(
S3OptionsFilter("year",
label = T("Year"),
#operator = "anyof",
#options = lambda: \
# self.stats_year_options("project_activity"),
options = project_activity_year_options,
),
)
if use_projects and settings.get_project_mode_drr():
rappend(("project_id$hazard_project.hazard_id"))
rappend((T("HFA"), "project_id$drr.hfa"))
if mode_task:
list_fields.insert(3, "time_estimated")
list_fields.insert(4, "time_actual")
rappend((T("Time Estimated"), "time_estimated"))
rappend((T("Time Actual"), "time_actual"))
default_fact = "sum(time_actual)"
#create_next = URL(c="project", f="activity",
# args=["[id]", "task"])
else:
#create_next = URL(c="project", f="activity", args=["[id]"])
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
filter_widgets.insert(0,
S3LocationFilter("location_id",
levels = levels,
))
posn = 2
for level in levels:
lfield = "location_id$%s" % level
list_fields.insert(posn, lfield)
report_fields.append(lfield)
posn += 1
# Highest-level of Hierarchy
default_row = "location_id$%s" % levels[0]
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = fact_fields,
defaults = Storage(rows = default_row,
cols = default_col,
fact = default_fact,
totals = True,
)
)
self.configure(tablename,
# Leave these workflows for Templates
#create_next = create_next,
deduplicate = self.project_activity_deduplicate,
filter_widgets = filter_widgets,
list_fields = list_fields,
#onaccept = self.project_activity_onaccept,
report_options = report_options,
super_entity = "doc_entity",
)
# Reusable Field
represent = project_ActivityRepresent()
activity_id = S3ReusableField("activity_id", "reference %s" % tablename,
comment = S3AddResourceLink(ADD_ACTIVITY,
c="project", f="activity",
tooltip=ACTIVITY_TOOLTIP),
label = T("Activity"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_activity.id",
represent,
sort=True)),
sortby="name",
)
# Also use this Represent for Report drilldowns
# @todo: make lazy_table
table = db[tablename]
table.id.represent = represent
# Components
add_components(tablename,
# Activity Types
project_activity_type = {"link": "project_activity_activity_type",
"joinby": "activity_id",
"key": "activity_type_id",
"actuate": "replace",
"autocomplete": "name",
"autodelete": False,
},
# Format for InlineComponent/filter_widget
project_activity_activity_type = "activity_id",
# Beneficiaries
project_beneficiary = {"link": "project_beneficiary_activity",
"joinby": "activity_id",
"key": "beneficiary_id",
"actuate": "hide",
},
# Format for InlineComponent/filter_widget
project_beneficiary_activity = "activity_id",
# Distributions
supply_distribution = "activity_id",
# Events
event_event = {"link": "event_activity",
"joinby": "activity_id",
"key": "event_id",
"actuate": "hide",
},
# Organisations
org_organisation = {"link": "project_activity_organisation",
"joinby": "activity_id",
"key": "organisation_id",
"actuate": "hide",
},
# Format for InlineComponent/filter_widget
project_activity_organisation = "activity_id",
# Organisation Groups (Coalitions/Networks)
org_group = {"link": "project_activity_group",
"joinby": "activity_id",
"key": "group_id",
"actuate": "hide",
},
# Format for InlineComponent/filter_widget
project_activity_group = "activity_id",
# Sectors
org_sector = {"link": "project_sector_activity",
"joinby": "activity_id",
"key": "sector_id",
"actuate": "hide",
},
# Format for InlineComponent/filter_widget
project_sector_activity = "activity_id",
# Tasks
project_task = {"link": "project_task_activity",
"joinby": "activity_id",
"key": "task_id",
"actuate": "replace",
"autocomplete": "name",
"autodelete": False,
},
# Themes
project_theme = {"link": "project_theme_activity",
"joinby": "activity_id",
"key": "theme_id",
"actuate": "hide",
},
# Format for InlineComponent/filter_widget
project_theme_activity = "activity_id",
)
# ---------------------------------------------------------------------
# Activity Type - Activity Link Table
#
tablename = "project_activity_activity_type"
define_table(tablename,
activity_id(empty = False,
ondelete = "CASCADE",
),
self.project_activity_type_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Activity Type"),
title_display = T("Activity Type"),
title_list = T("Activity Types"),
title_update = T("Edit Activity Type"),
title_upload = T("Import Activity Type data"),
label_list_button = T("List Activity Types"),
msg_record_created = T("Activity Type added to Activity"),
msg_record_modified = T("Activity Type Updated"),
msg_record_deleted = T("Activity Type removed from Activity"),
msg_list_empty = T("No Activity Types found for this Activity")
)
# Pass names back to global scope (s3.*)
return dict(project_activity_id = activity_id,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for model-global names if module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(project_activity_id = lambda **attr: dummy("activity_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def project_activity_deduplicate(item):
""" Import item de-duplication """
data = item.data
project_id = data.get("project_id")
name = data.get("name")
# Match activity by project_id and name
if project_id and name:
table = item.table
query = (table.project_id == project_id) & \
(table.name == name)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# ---------------------------------------------------------------------
@staticmethod
def project_activity_year(row):
"""
Virtual field for the project_activity table
@ToDo: Deprecate: replace with computed field
"""
if hasattr(row, "project_activity"):
row = row.project_activity
try:
activity_id = row.id
except AttributeError:
return []
if hasattr(row, "date"):
start_date = row.date
else:
start_date = False
if hasattr(row, "end_date"):
end_date = row.end_date
else:
end_date = False
if start_date is False or end_date is False:
s3db = current.s3db
table = s3db.project_activity
activity = current.db(table.id == activity_id).select(table.date,
table.end_date,
cache=s3db.cache,
limitby=(0, 1)
).first()
if activity:
start_date = activity.date
end_date = activity.end_date
if not start_date and not end_date:
return []
elif not end_date:
return [start_date.year]
elif not start_date:
return [end_date.year]
else:
return list(xrange(start_date.year, end_date.year + 1))
# =============================================================================
class S3ProjectActivityTypeModel(S3Model):
"""
Project Activity Type Model
This model holds the Activity Types for Projects
- it is useful where we don't have the details on the actual Activities,
but just this summary of Types
"""
names = ("project_activity_type",
"project_activity_type_location",
"project_activity_type_project",
"project_activity_type_sector",
"project_activity_type_id",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# Activity Types
#
tablename = "project_activity_type"
define_table(tablename,
Field("name", length=128, notnull=True, unique=True,
label = T("Name"),
represent = lambda v: T(v) if v is not None \
else NONE,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_ACTIVITY_TYPE = T("Create Activity Type")
crud_strings[tablename] = Storage(
label_create = ADD_ACTIVITY_TYPE,
title_display = T("Activity Type"),
title_list = T("Activity Types"),
title_update = T("Edit Activity Type"),
label_list_button = T("List Activity Types"),
msg_record_created = T("Activity Type Added"),
msg_record_modified = T("Activity Type Updated"),
msg_record_deleted = T("Activity Type Deleted"),
msg_list_empty = T("No Activity Types Found")
)
# Reusable Fields
represent = S3Represent(lookup=tablename, translate=True)
activity_type_id = S3ReusableField("activity_type_id", "reference %s" % tablename,
label = T("Activity Type"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_activity_type.id",
represent,
sort=True)),
sortby = "name",
comment = S3AddResourceLink(title=ADD_ACTIVITY_TYPE,
c="project",
f="activity_type",
tooltip=T("If you don't see the type in the list, you can add a new one by clicking link 'Create Activity Type'.")),
)
if current.deployment_settings.get_project_sectors():
# Component (for Custom Form)
self.add_components(tablename,
project_activity_type_sector = "activity_type_id",
)
crud_form = S3SQLCustomForm(
"name",
# Sectors
S3SQLInlineComponent(
"activity_type_sector",
label=T("Sectors to which this Activity Type can apply"),
fields=["sector_id"],
),
"comments",
)
self.configure(tablename,
crud_form = crud_form,
list_fields = ["id",
"name",
(T("Sectors"), "activity_type_sector.sector_id"),
"comments",
],
)
# ---------------------------------------------------------------------
# Activity Type - Sector Link Table
#
tablename = "project_activity_type_sector"
define_table(tablename,
activity_type_id(empty = False,
ondelete = "CASCADE",
),
self.org_sector_id(label = "",
empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Activity Type - Project Location Link Table
#
tablename = "project_activity_type_location"
define_table(tablename,
activity_type_id(empty = False,
ondelete = "CASCADE",
),
self.project_location_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Activity Type - Project Link Table
#
tablename = "project_activity_type_project"
define_table(tablename,
activity_type_id(empty = False,
ondelete = "CASCADE",
),
self.project_project_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Activity Type"),
title_display = T("Activity Type"),
title_list = T("Activity Types"),
title_update = T("Edit Activity Type"),
title_upload = T("Import Activity Type data"),
label_list_button = T("List Activity Types"),
msg_record_created = T("Activity Type added to Project Location"),
msg_record_modified = T("Activity Type Updated"),
msg_record_deleted = T("Activity Type removed from Project Location"),
msg_list_empty = T("No Activity Types found for this Project Location")
)
# Pass names back to global scope (s3.*)
return dict(project_activity_type_id = activity_type_id,
)
# =============================================================================
class S3ProjectActivityOrganisationModel(S3Model):
"""
Project Activity Organisation Model
This model allows Activities to link to Organisations
&/or Organisation Groups
- useful when we don't have the details of the Projects
"""
names = ("project_activity_organisation",
"project_activity_group",
)
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
project_activity_id = self.project_activity_id
# ---------------------------------------------------------------------
# Activities <> Organisations - Link table
#
tablename = "project_activity_organisation"
define_table(tablename,
project_activity_id(empty = False,
ondelete = "CASCADE",
),
self.org_organisation_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Organization to Activity"),
title_display = T("Activity Organization"),
title_list = T("Activity Organizations"),
title_update = T("Edit Activity Organization"),
label_list_button = T("List Activity Organizations"),
msg_record_created = T("Activity Organization Added"),
msg_record_modified = T("Activity Organization Updated"),
msg_record_deleted = T("Activity Organization Deleted"),
msg_list_empty = T("No Activity Organizations Found")
)
configure(tablename,
deduplicate = self.project_activity_organisation_deduplicate,
)
# ---------------------------------------------------------------------
# Activities <> Organisation Groups - Link table
#
tablename = "project_activity_group"
define_table(tablename,
project_activity_id(empty = False,
ondelete = "CASCADE",
),
self.org_group_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
configure(tablename,
deduplicate = self.project_activity_group_deduplicate,
)
# Pass names back to global scope (s3.*)
return {}
# -------------------------------------------------------------------------
@staticmethod
def project_activity_organisation_deduplicate(item):
""" Import item de-duplication """
data = item.data
activity_id = data.get("activity_id")
organisation_id = data.get("organisation_id")
if activity_id and organisation_id:
table = item.table
query = (table.activity_id == activity_id) & \
(table.organisation_id == organisation_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def project_activity_group_deduplicate(item):
""" Import item de-duplication """
data = item.data
activity_id = data.get("activity_id")
group_id = data.get("group_id")
if activity_id and group_id:
table = item.table
query = (table.activity_id == activity_id) & \
(table.group_id == group_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3ProjectActivitySectorModel(S3Model):
"""
Project Activity Sector Model
An Activity can be classified to 1 or more Sectors
"""
names = ("project_sector_activity",)
def model(self):
# ---------------------------------------------------------------------
# Project Activities <> Sectors Link Table
#
# @ToDo" When Activity is linked to a Project, ensure these stay in sync
#
tablename = "project_sector_activity"
self.define_table(tablename,
self.org_sector_id(empty = False,
ondelete = "CASCADE",
),
self.project_activity_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
self.configure(tablename,
deduplicate = self.project_sector_activity_deduplicate,
)
# Pass names back to global scope (s3.*)
return {}
# -------------------------------------------------------------------------
@staticmethod
def project_sector_activity_deduplicate(item):
""" Import item de-duplication """
data = item.data
activity_id = data.get("activity_id")
sector_id = data.get("sector_id")
if activity_id and sector_id:
table = item.table
query = (table.activity_id == activity_id) & \
(table.sector_id == sector_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3ProjectAnnualBudgetModel(S3Model):
"""
Project Budget Model
This model holds the annual budget entries for projects
@ToDo: Replace with Budget module
"""
names = ("project_annual_budget",)
def model(self):
T = current.T
db = current.db
# ---------------------------------------------------------------------
# Annual Budgets
#
tablename = "project_annual_budget"
self.define_table(tablename,
self.project_project_id(
# Override requires so that update access to the projects isn't required
requires = IS_ONE_OF(db, "project_project.id",
self.project_project_represent
)
),
Field("year", "integer", notnull=True,
default = None, # make it current year
label = T("Year"),
requires = IS_INT_IN_RANGE(1950, 3000),
),
Field("amount", "double", notnull=True,
default = 0.00,
label = T("Amount"),
#label = T("Amount Budgeted"),
requires = IS_FLOAT_AMOUNT(),
),
#Field("amount_spent", "double", notnull=True,
# default = 0.00,
# label = T("Amount Spent"),
# requires = IS_FLOAT_AMOUNT(),
# # Enable in templates as-required
# readable = False,
# writable = False,
# ),
s3_currency(required=True),
*s3_meta_fields())
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Annual Budget"),
title_display = T("Annual Budget"),
title_list = T("Annual Budgets"),
title_update = T("Edit Annual Budget"),
title_upload = T("Import Annual Budget data"),
title_report = T("Report on Annual Budgets"),
label_list_button = T("List Annual Budgets"),
msg_record_created = T("New Annual Budget created"),
msg_record_modified = T("Annual Budget updated"),
msg_record_deleted = T("Annual Budget deleted"),
msg_list_empty = T("No annual budgets found")
)
self.configure(tablename,
list_fields = ["year",
"amount",
"currency",
],
)
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
class S3ProjectBeneficiaryModel(S3Model):
"""
Project Beneficiary Model
- depends on Stats module
"""
names = ("project_beneficiary_type",
"project_beneficiary",
"project_beneficiary_activity",
"project_beneficiary_activity_type",
)
def model(self):
if not current.deployment_settings.has_module("stats"):
current.log.warning("Project Beneficiary Model needs Stats module enabling")
return {}
T = current.T
db = current.db
s3 = current.response.s3
settings = current.deployment_settings
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Project Beneficiary Type
#
tablename = "project_beneficiary_type"
define_table(tablename,
super_link("parameter_id", "stats_parameter"),
Field("name", length=128, unique=True,
label = T("Name"),
represent = lambda v: T(v) if v is not None \
else NONE,
requires = IS_NOT_IN_DB(db,
"project_beneficiary_type.name"),
),
s3_comments("description",
label = T("Description"),
),
*s3_meta_fields())
# CRUD Strings
ADD_BNF_TYPE = T("Create Beneficiary Type")
crud_strings[tablename] = Storage(
label_create = ADD_BNF_TYPE,
title_display = T("Beneficiary Type"),
title_list = T("Beneficiary Types"),
title_update = T("Edit Beneficiary Type"),
label_list_button = T("List Beneficiary Types"),
msg_record_created = T("Beneficiary Type Added"),
msg_record_modified = T("Beneficiary Type Updated"),
msg_record_deleted = T("Beneficiary Type Deleted"),
msg_list_empty = T("No Beneficiary Types Found")
)
# Resource Configuration
configure(tablename,
super_entity = "stats_parameter",
)
# ---------------------------------------------------------------------
# Project Beneficiary
#
# @ToDo: Split project_id & project_location_id to separate Link Tables
#
tablename = "project_beneficiary"
define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
# Link Fields
# populated automatically
self.project_project_id(readable = False,
writable = False,
),
self.project_location_id(comment = None),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
empty = False,
instance_types = ("project_beneficiary_type",),
label = T("Beneficiary Type"),
represent = S3Represent(lookup="stats_parameter",
translate=True,
),
readable = True,
writable = True,
comment = S3AddResourceLink(c="project",
f="beneficiary_type",
vars = dict(child="parameter_id"),
title=ADD_BNF_TYPE,
tooltip=T("Please record Beneficiary according to the reporting needs of your project")
),
),
# Populated automatically from project_location
self.gis_location_id(readable = False,
writable = False,
),
Field("value", "integer",
label = T("Number"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Actual Number of Beneficiaries"),
T("The number of beneficiaries actually reached by this activity"))
),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_INT_IN_RANGE(0, 99999999),
),
Field("target_value", "integer",
label = T("Targeted Number"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Targeted Number of Beneficiaries"),
T("The number of beneficiaries targeted by this activity"))
),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)),
),
s3_date("date",
#empty = False,
label = T("Start Date"),
set_min = "#project_beneficiary_end_date",
),
s3_date("end_date",
#empty = False,
label = T("End Date"),
set_max = "#project_beneficiary_date",
start_field = "project_beneficiary_date",
default_interval = 12,
),
Field("year", "list:integer",
compute = lambda row: \
self.stats_year(row, "project_beneficiary"),
label = T("Year"),
),
#self.stats_source_id(),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_BNF = T("Add Beneficiaries")
crud_strings[tablename] = Storage(
label_create = ADD_BNF,
title_display = T("Beneficiaries Details"),
title_list = T("Beneficiaries"),
title_update = T("Edit Beneficiaries"),
title_report = T("Beneficiary Report"),
label_list_button = T("List Beneficiaries"),
msg_record_created = T("Beneficiaries Added"),
msg_record_modified = T("Beneficiaries Updated"),
msg_record_deleted = T("Beneficiaries Deleted"),
msg_list_empty = T("No Beneficiaries Found")
)
# Model options
programmes = settings.get_project_programmes()
sectors = settings.get_project_sectors()
hazards = settings.get_project_hazards()
themes = settings.get_project_themes()
programme_id = "project_id$programme_project.programme_id"
sector_id = "project_id$sector_project.sector_id"
hazard_id = "project_id$hazard_project.hazard_id"
theme_id = "project_id$theme_project.theme_id"
# Which levels of location hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
# Filter Widgets
filter_widgets = [
#S3TextFilter(["project_id$name",
# "project_id$code",
# "project_id$description",
# "project_id$organisation.name",
# "project_id$organisation.acronym",
# ],
# label = T("Search"),
# _class = "filter-search",
# ),
#S3OptionsFilter("project_id",
# hidden = True,
# ),
S3OptionsFilter("parameter_id",
label = T("Beneficiary Type"),
#hidden = True,
),
S3OptionsFilter("year",
operator = "anyof",
options = lambda: \
self.stats_year_options("project_beneficiary"),
hidden = True,
),
S3LocationFilter("location_id",
levels = levels,
#hidden = True,
),
]
if programmes:
filter_widgets.insert(0, S3OptionsFilter(programme_id))
if sectors:
filter_widgets.insert(0, S3OptionsFilter(sector_id))
if themes:
filter_widgets.append(S3OptionsFilter(theme_id))
# List fields
list_fields = ["project_id",
(T("Beneficiary Type"), "parameter_id"),
"value",
"target_value",
"year",
]
if settings.get_project_programmes():
list_fields.append(programme_id)
# Report axes
report_fields = [(T("Beneficiary Type"), "parameter_id"),
"project_id",
#"project_location_id",
"year",
]
add_report_field = report_fields.append
if programmes:
add_report_field(programme_id)
if sectors:
add_report_field(sector_id)
if hazards:
add_report_field(hazard_id)
if themes:
add_report_field(theme_id)
# Location levels (append to list fields and report axes)
for level in levels:
lfield = "location_id$%s" % level
list_fields.append(lfield)
add_report_field(lfield)
if "L0" in levels:
default_row = "location_id$L0"
elif "L1" in levels:
default_row = "location_id$L1"
else:
default_row = "project_id"
# Report options and defaults
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = [(T("Number of Beneficiaries"),
"sum(value)",
),
(T("Number of Beneficiaries Targeted"),
"sum(target_value)",
),
],
defaults = Storage(rows=default_row,
cols="parameter_id",
fact="sum(value)",
totals=True
),
)
# Resource configuration
configure(tablename,
context = {"project": "project_id",
},
deduplicate = self.project_beneficiary_deduplicate,
filter_widgets = filter_widgets,
list_fields = list_fields,
onaccept = self.project_beneficiary_onaccept,
report_options = report_options,
super_entity = "stats_data",
)
# Reusable Field
beneficiary_id = S3ReusableField("beneficiary_id", "reference %s" % tablename,
label = T("Beneficiaries"),
ondelete = "SET NULL",
represent = self.project_beneficiary_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_beneficiary.id",
self.project_beneficiary_represent,
sort=True)),
sortby = "name",
comment = S3AddResourceLink(c="project", f="beneficiary",
title=ADD_BNF,
tooltip=\
T("If you don't see the beneficiary in the list, you can add a new one by clicking link 'Add Beneficiaries'.")),
)
# Components
self.add_components(tablename,
# Activity Types
project_activity_type = {"link": "project_beneficiary_activity_type",
"joinby": "beneficiary_id",
"key": "activity_type_id",
"actuate": "hide",
},
# Format for OptionsFilter
project_beneficiary_activity_type = "beneficiary_id",
)
# ---------------------------------------------------------------------
# Beneficiary <> Activity Link Table
#
tablename = "project_beneficiary_activity"
define_table(tablename,
self.project_activity_id(empty = False,
ondelete = "CASCADE",
),
beneficiary_id(empty = False,
ondelete = "CASCADE",
),
#s3_comments(),
*s3_meta_fields())
configure(tablename,
deduplicate = self.project_beneficiary_activity_deduplicate,
)
# ---------------------------------------------------------------------
# Beneficiary <> Activity Type Link Table
#
tablename = "project_beneficiary_activity_type"
define_table(tablename,
self.project_activity_type_id(empty = False,
ondelete = "CASCADE",
),
beneficiary_id(empty = False,
ondelete = "CASCADE",
),
#s3_comments(),
*s3_meta_fields())
configure(tablename,
deduplicate = self.project_beneficiary_activity_type_deduplicate,
)
# Pass names back to global scope (s3.*)
return {}
# -------------------------------------------------------------------------
@staticmethod
def project_beneficiary_represent(id, row=None):
"""
FK representation
@ToDo: Bulk
"""
if row:
return row.type
if not id:
return current.messages["NONE"]
db = current.db
table = db.project_beneficiary
ttable = db.project_beneficiary_type
query = (table.id == id) & \
(table.parameter_id == ttable.id)
r = db(query).select(table.value,
ttable.name,
limitby = (0, 1)).first()
try:
return "%s %s" % (r["project_beneficiary.value"],
r["project_beneficiary_type.name"])
except:
return current.messages.UNKNOWN_OPT
# ---------------------------------------------------------------------
@staticmethod
def project_beneficiary_onaccept(form):
"""
Update project_beneficiary project & location from project_location_id
"""
db = current.db
btable = db.project_beneficiary
ltable = db.project_location
record_id = form.vars.id
query = (btable.id == record_id) & \
(ltable.id == btable.project_location_id)
project_location = db(query).select(ltable.project_id,
ltable.location_id,
limitby=(0, 1)).first()
if project_location:
db(btable.id == record_id).update(
project_id = project_location.project_id,
location_id = project_location.location_id
)
# ---------------------------------------------------------------------
@staticmethod
def project_beneficiary_deduplicate(item):
""" Import item de-duplication """
data = item.data
parameter_id = data.get("parameter_id")
project_location_id = data.get("project_location_id")
# Match beneficiary by type and project_location
if parameter_id and project_location_id:
table = item.table
query = (table.parameter_id == parameter_id) & \
(table.project_location_id == project_location_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# ---------------------------------------------------------------------
@staticmethod
def project_beneficiary_activity_deduplicate(item):
""" Import item de-duplication """
data = item.data
activity_id = data.get("activity_id")
beneficiary_id = data.get("beneficiary_id")
if beneficiary_id and activity_id:
table = item.table
query = (table.beneficiary_id == beneficiary_id) & \
(table.activity_id == activity_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# ---------------------------------------------------------------------
@staticmethod
def project_beneficiary_activity_type_deduplicate(item):
""" Import item de-duplication """
data = item.data
activity_type_id = data.get("activity_type_id")
beneficiary_id = data.get("beneficiary_id")
if beneficiary_id and activity_type_id:
table = item.table
query = (table.beneficiary_id == beneficiary_id) & \
(table.activity_type_id == activity_type_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3ProjectCampaignModel(S3Model):
"""
Project Campaign Model
- used for TERA integration:
http://www.ifrc.org/en/what-we-do/beneficiary-communications/tera/
- depends on Stats module
"""
names = ("project_campaign",
"project_campaign_message",
"project_campaign_keyword",
#"project_campaign_response",
"project_campaign_response_summary",
)
def model(self):
if not current.deployment_settings.has_module("stats"):
# Campaigns Model needs Stats module enabling
return {}
T = current.T
db = current.db
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
location_id = self.gis_location_id
# ---------------------------------------------------------------------
# Project Campaign
#
tablename = "project_campaign"
define_table(tablename,
#self.project_project_id(),
Field("name", length=128, #unique=True,
label = T("Name"),
#requires = IS_NOT_IN_DB(db,
# "project_campaign.name")
),
s3_comments("description",
label = T("Description"),
),
*s3_meta_fields())
# CRUD Strings
ADD_CAMPAIGN = T("Create Campaign")
crud_strings[tablename] = Storage(
label_create = ADD_CAMPAIGN,
title_display = T("Campaign"),
title_list = T("Campaigns"),
title_update = T("Edit Campaign"),
label_list_button = T("List Campaigns"),
msg_record_created = T("Campaign Added"),
msg_record_modified = T("Campaign Updated"),
msg_record_deleted = T("Campaign Deleted"),
msg_list_empty = T("No Campaigns Found")
)
# Reusable Field
represent = S3Represent(lookup=tablename)
campaign_id = S3ReusableField("campaign_id", "reference %s" % tablename,
sortby="name",
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_campaign.id",
represent,
sort=True)),
represent = represent,
label = T("Campaign"),
comment = S3AddResourceLink(c="project",
f="campaign",
title=ADD_CAMPAIGN,
tooltip=\
T("If you don't see the campaign in the list, you can add a new one by clicking link 'Add Campaign'.")),
ondelete = "CASCADE")
add_components(tablename,
project_campaign_message = "campaign_id",
)
# ---------------------------------------------------------------------
# Project Campaign Message
# - a Message to broadcast to a geographic location (Polygon)
#
tablename = "project_campaign_message"
define_table(tablename,
campaign_id(),
Field("name", length=128, #unique=True,
#requires = IS_NOT_IN_DB(db,
# "project_campaign.name")
),
s3_comments("message",
label = T("Message")),
location_id(
widget = S3LocationSelector(catalog_layers = True,
points = False,
polygons = True,
)
),
# @ToDo: Allow selection of which channel message should be sent out on
#self.msg_channel_id(),
# @ToDo: Record the Message sent out
#self.msg_message_id(),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Campaign Message"),
title_display = T("Campaign Message"),
title_list = T("Campaign Messages"),
title_update = T("Edit Campaign Message"),
label_list_button = T("List Campaign Messages"),
msg_record_created = T("Campaign Message Added"),
msg_record_modified = T("Campaign Message Updated"),
msg_record_deleted = T("Campaign Message Deleted"),
msg_list_empty = T("No Campaign Messages Found")
)
# Reusable Field
represent = S3Represent(lookup=tablename)
message_id = S3ReusableField("campaign_message_id", "reference %s" % tablename,
sortby="name",
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_campaign_message.id",
represent,
sort=True)),
represent = represent,
label = T("Campaign Message"),
ondelete = "CASCADE")
# Components
add_components(tablename,
# Responses
#project_campaign_response = "campaign_message_id",
# Summary
project_campaign_response_summary = "campaign_message_id",
)
# ---------------------------------------------------------------------
# Project Campaign Keyword
# - keywords in responses which are used in Stats reporting
#
tablename = "project_campaign_keyword"
define_table(tablename,
super_link("parameter_id", "stats_parameter"),
Field("name", length=128, unique=True,
label = T("Name"),
requires = IS_NOT_IN_DB(db,
"project_campaign_keyword.name"),
),
s3_comments("description",
label = T("Description"),
),
*s3_meta_fields())
# CRUD Strings
ADD_CAMPAIGN_KW = T("Add Keyword")
crud_strings[tablename] = Storage(
label_create = ADD_CAMPAIGN_KW,
title_display = T("Keyword"),
title_list = T("Keywords"),
title_update = T("Edit Keyword"),
label_list_button = T("List Keywords"),
msg_record_created = T("Keyword Added"),
msg_record_modified = T("Keyword Updated"),
msg_record_deleted = T("Keyword Deleted"),
msg_list_empty = T("No Keywords Found")
)
# Resource Configuration
configure(tablename,
super_entity = "stats_parameter",
)
# ---------------------------------------------------------------------
# Project Campaign Response
# - individual response (unused for TERA)
# - this can be populated by parsing raw responses
# - these are aggregated into project_campaign_response_summary
#
#tablename = "project_campaign_response"
#define_table(tablename,
# message_id(),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
# super_link("parameter_id", "stats_parameter",
# label = T("Keyword"),
# instance_types = ("project_campaign_keyword",),
# represent = S3Represent(lookup="stats_parameter"),
# readable = True,
# writable = True,
# empty = False,
# ),
# Getting this without TERA may be hard!
#location_id(writable = False),
# @ToDo: Link to the raw Message received
#self.msg_message_id(),
# s3_datetime(),
# s3_comments(),
# *s3_meta_fields())
# CRUD Strings
#ADD_CAMPAIGN_RESP = T("Add Response")
#crud_strings[tablename] = Storage(
# label_create = ADD_CAMPAIGN_RESP,
# title_display = T("Response Details"),
# title_list = T("Responses"),
# title_update = T("Edit Response"),
# title_report = T("Response Report"),
# label_list_button = T("List Responses"),
# msg_record_created = T("Response Added"),
# msg_record_modified = T("Response Updated"),
# msg_record_deleted = T("Response Deleted"),
# msg_list_empty = T("No Responses Found")
#)
# ---------------------------------------------------------------------
# Project Campaign Response Summary
# - aggregated responses (by Keyword/Location)
# - TERA data comes in here
#
tablename = "project_campaign_response_summary"
define_table(tablename,
message_id(),
# Instance
super_link("data_id", "stats_data"),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Keyword"),
instance_types = ("project_campaign_keyword",),
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
empty = False,
),
# Populated automatically (by TERA)
# & will be a msg_basestation?
location_id(writable = False),
Field("value", "integer",
label = T("Number of Responses"),
represent = lambda v: \
IS_INT_AMOUNT.represent(v),
requires = IS_INT_IN_RANGE(0, 99999999),
),
# @ToDo: Populate automatically from time Message is sent?
s3_date("date",
label = T("Start Date"),
#empty = False,
),
s3_date("end_date",
label = T("End Date"),
start_field = "project_campaign_response_summary_date",
default_interval = 1,
#empty = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_CAMPAIGN_RESP_SUMM = T("Add Response Summary")
crud_strings[tablename] = Storage(
label_create = ADD_CAMPAIGN_RESP_SUMM,
title_display = T("Response Summary Details"),
title_list = T("Response Summaries"),
title_update = T("Edit Response Summary"),
title_report = T("Response Summary Report"),
label_list_button = T("List Response Summaries"),
msg_record_created = T("Response Summary Added"),
msg_record_modified = T("Response Summary Updated"),
msg_record_deleted = T("Response Summary Deleted"),
msg_list_empty = T("No Response Summaries Found")
)
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
class S3ProjectFrameworkModel(S3Model):
"""
Project Framework Model
"""
names = ("project_framework",
"project_framework_organisation",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
messages = current.messages
ORGANISATION = messages.ORGANISATION
ORGANISATIONS = T("Organization(s)")
# ---------------------------------------------------------------------
# Project Frameworks
#
tablename = "project_framework"
define_table(tablename,
self.super_link("doc_id", "doc_entity"),
Field("name", length=255, unique=True,
label = T("Name"),
),
s3_comments("description",
label = T("Description"),
comment = None,
),
Field("time_frame",
label = T("Time Frame"),
represent = lambda v: v or messages.NONE,
),
*s3_meta_fields())
# CRUD Strings
if current.deployment_settings.get_auth_record_approval():
msg_record_created = T("Policy or Strategy added, awaiting administrator's approval")
else:
msg_record_created = T("Policy or Strategy added")
crud_strings[tablename] = Storage(
label_create = T("Create Policy or Strategy"),
title_display = T("Policy or Strategy"),
title_list = T("Policies & Strategies"),
title_update = T("Edit Policy or Strategy"),
title_upload = T("Import Policies & Strategies"),
label_list_button = T("List Policies & Strategies"),
msg_record_created = msg_record_created,
msg_record_modified = T("Policy or Strategy updated"),
msg_record_deleted = T("Policy or Strategy deleted"),
msg_list_empty = T("No Policies or Strategies found")
)
crud_form = S3SQLCustomForm(
"name",
S3SQLInlineComponent(
"framework_organisation",
label = ORGANISATIONS,
fields = ["organisation_id"],
),
"description",
"time_frame",
S3SQLInlineComponent(
"document",
label = T("Files"),
fields = ["file"],
filterby = dict(field = "file",
options = "",
invert = True,
)
),
)
#filter_widgets = [
# S3TextFilter(["name",
# "description",
# ],
# label = T("Name"),
# comment = T("Search for a Policy or Strategy by name or description."),
# ),
#]
self.configure(tablename,
super_entity="doc_entity",
crud_form = crud_form,
#filter_widgets = filter_widgets,
list_fields = ["name",
(ORGANISATIONS, "framework_organisation.organisation_id"),
"description",
"time_frame",
(T("Files"), "document.file"),
]
)
represent = S3Represent(lookup=tablename)
framework_id = S3ReusableField("framework_id", "reference %s" % tablename,
label = ORGANISATION,
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_framework.id",
represent
)),
)
self.add_components(tablename,
project_framework_organisation = "framework_id",
)
# ---------------------------------------------------------------------
# Project Framework Organisations
#
tablename = "project_framework_organisation"
define_table(tablename,
framework_id(),
self.org_organisation_id(),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("New Organization"),
title_display = ORGANISATION,
title_list = T("Organizations"),
title_update = T("Edit Organization"),
label_list_button = T("List Organizations"),
msg_record_created = T("Organization added to Policy/Strategy"),
msg_record_modified = T("Organization updated"),
msg_record_deleted = T("Organization removed from Policy/Strategy"),
msg_list_empty = T("No Organizations found for this Policy/Strategy")
)
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
class S3ProjectHazardModel(S3Model):
"""
Project Hazard Model
"""
names = ("project_hazard",
"project_hazard_project",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
NONE = current.messages["NONE"]
# ---------------------------------------------------------------------
# Hazard
#
tablename = "project_hazard"
define_table(tablename,
Field("name", length=128, notnull=True, unique=True,
label = T("Name"),
represent = lambda v: T(v) if v is not None \
else NONE,
),
s3_comments(
represent = lambda v: T(v) if v is not None \
else NONE,
),
*s3_meta_fields())
# CRUD Strings
ADD_HAZARD = T("Create Hazard")
crud_strings[tablename] = Storage(
label_create = ADD_HAZARD,
title_display = T("Hazard Details"),
title_list = T("Hazards"),
title_update = T("Edit Hazard"),
title_upload = T("Import Hazards"),
label_list_button = T("List Hazards"),
label_delete_button = T("Delete Hazard"),
msg_record_created = T("Hazard added"),
msg_record_modified = T("Hazard updated"),
msg_record_deleted = T("Hazard deleted"),
msg_list_empty = T("No Hazards currently registered"))
# Reusable Field
represent = S3Represent(lookup=tablename, translate=True)
hazard_id = S3ReusableField("hazard_id", "reference %s" % tablename,
sortby = "name",
label = T("Hazards"),
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_hazard.id",
represent,
sort=True)),
represent = represent,
ondelete = "CASCADE",
)
# ---------------------------------------------------------------------
# Projects <> Hazards Link Table
#
tablename = "project_hazard_project"
define_table(tablename,
hazard_id(),
self.project_project_id(),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("New Hazard"),
title_display = T("Hazard"),
title_list = T("Hazards"),
title_update = T("Edit Hazard"),
title_upload = T("Import Hazard data"),
label_list_button = T("List Hazards"),
msg_record_created = T("Hazard added to Project"),
msg_record_modified = T("Hazard updated"),
msg_record_deleted = T("Hazard removed from Project"),
msg_list_empty = T("No Hazards found for this Project"))
self.configure(tablename,
deduplicate = self.project_hazard_project_deduplicate,
)
# Pass names back to global scope (s3.*)
return {}
# -------------------------------------------------------------------------
@staticmethod
def project_hazard_project_deduplicate(item):
""" Import item de-duplication """
data = item.data
project_id = data.get("project_id")
hazard_id = data.get("hazard_id")
if project_id and hazard_id:
table = item.table
query = (table.project_id == project_id) & \
(table.hazard_id == hazard_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3ProjectHRModel(S3Model):
"""
Optionally link Projects <> Human Resources
"""
names = ("project_human_resource",)
def model(self):
T = current.T
settings = current.deployment_settings
status_opts = {1: T("Assigned"),
#2: T("Standing By"),
#3: T("Active"),
4: T("Left"),
#5: T("Unable to activate"),
}
community_volunteers = settings.get_project_community_volunteers()
# ---------------------------------------------------------------------
# Projects <> Human Resources
#
tablename = "project_human_resource"
self.define_table(tablename,
# Instance table
self.super_link("cost_item_id", "budget_cost_item"),
self.project_project_id(empty = False,
ondelete = "CASCADE",
),
self.project_location_id(ondelete = "CASCADE",
readable = community_volunteers,
writable = community_volunteers,
),
self.hrm_human_resource_id(empty = False,
ondelete = "CASCADE",
),
Field("status", "integer",
default = 1,
represent = lambda opt: \
status_opts.get(opt, current.messages.UNKNOWN_OPT),
requires = IS_IN_SET(status_opts),
),
*s3_meta_fields()
)
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Assign Human Resource"),
title_display = T("Human Resource Details"),
title_list = T("Assigned Human Resources"),
title_update = T("Edit Human Resource"),
label_list_button = T("List Assigned Human Resources"),
label_delete_button = T("Remove Human Resource from this project"),
msg_record_created = T("Human Resource assigned"),
msg_record_modified = T("Human Resource Assignment updated"),
msg_record_deleted = T("Human Resource unassigned"),
msg_list_empty = T("No Human Resources currently assigned to this project"))
if settings.has_module("budget"):
crud_form = S3SQLCustomForm("project_id",
"human_resource_id",
"status",
S3SQLInlineComponent("allocation",
label = T("Budget"),
fields = ["budget_id",
"start_date",
"end_date",
"daily_cost",
],
),
)
else:
crud_form = None
self.configure(tablename,
crud_form = crud_form,
list_fields = [#"project_id", # Not being dropped in component view
"human_resource_id",
"status",
"allocation.budget_id",
"allocation.start_date",
"allocation.end_date",
"allocation.daily_cost",
],
onvalidation = self.project_human_resource_onvalidation,
super_entity = "budget_cost_item",
)
# Pass names back to global scope (s3.*)
return {}
# -------------------------------------------------------------------------
@staticmethod
def project_human_resource_onvalidation(form):
"""
Prevent the same hrm_human_resource record being added more than
once.
"""
# The project human resource table
hr = current.s3db.project_human_resource
# Fetch the first row that has the same project and human resource ids
query = (hr.human_resource_id == form.vars.human_resource_id) & \
(hr.project_id == form.request_vars.project_id)
row = current.db(query).select(hr.id,
limitby=(0, 1)).first()
# If we found a row we have a duplicate. Return an error to the user.
if row:
form.errors.human_resource_id = current.T("Record already exists")
return
# =============================================================================
class S3ProjectIndicatorModel(S3Model):
"""
Project Indicator Model
- depends on Stats module
Unused...instead use ProjectPlanningModel since Indicators are not reused across Projects
"""
names = ("project_indicator",
"project_indicator_data",
)
def model(self):
if not current.deployment_settings.has_module("stats"):
current.log.warning("Project Indicator Model needs Stats module enabling")
return {}
T = current.T
db = current.db
s3 = current.response.s3
settings = current.deployment_settings
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Project Indicator
#
tablename = "project_indicator"
define_table(tablename,
super_link("parameter_id", "stats_parameter"),
Field("name", length=128, unique=True,
label = T("Name"),
represent = lambda v: T(v) if v is not None \
else NONE,
requires = IS_NOT_IN_DB(db,
"project_indicator.name"),
),
s3_comments("description",
label = T("Description"),
),
*s3_meta_fields())
# CRUD Strings
ADD_INDICATOR = T("Create Indicator")
crud_strings[tablename] = Storage(
label_create = ADD_INDICATOR,
title_display = T("Indicator"),
title_list = T("Indicators"),
title_update = T("Edit Indicator"),
label_list_button = T("List Indicators"),
msg_record_created = T("Indicator Added"),
msg_record_modified = T("Indicator Updated"),
msg_record_deleted = T("Indicator Deleted"),
msg_list_empty = T("No Indicators Found")
)
# Resource Configuration
configure(tablename,
super_entity = "stats_parameter",
)
# ---------------------------------------------------------------------
# Project Indicator Data
#
tablename = "project_indicator_data"
define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
self.project_project_id(),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
empty = False,
instance_types = ("project_indicator",),
label = T("Indicator"),
represent = S3Represent(lookup="stats_parameter",
translate=True,
),
readable = True,
writable = True,
comment = S3AddResourceLink(c="project",
f="indicator",
vars = dict(child="parameter_id"),
title=ADD_INDICATOR),
),
#self.gis_location_id(),
s3_date(empty = False,
#label = T("Start Date"),
),
#s3_date("end_date",
# #empty = False,
# label = T("End Date"),
# start_field = "project_indicator_data_date",
# default_interval = 12,
# ),
#Field("year", "list:integer",
# compute = lambda row: \
# self.stats_year(row, "project_indicator_data"),
# label = T("Year"),
# ),
Field("target_value", "integer",
label = T("Target Value"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)),
),
Field("value", "integer",
label = T("Actual Value"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)),
),
# Link to Source
#self.stats_source_id(),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Indicator Data"),
title_display = T("Indicator Data Details"),
title_list = T("Indicator Data"),
title_update = T("Edit Indicator Data"),
title_report = T("Indicators Report"),
label_list_button = T("List Indicator Data"),
msg_record_created = T("Indicator Data Added"),
msg_record_modified = T("Indicator Data Updated"),
msg_record_deleted = T("Indicator Data Deleted"),
msg_list_empty = T("No Indicator Data Found")
)
# Model options
programmes = settings.get_project_programmes()
sectors = settings.get_project_sectors()
hazards = settings.get_project_hazards()
themes = settings.get_project_themes()
programme_id = "project_id$programme_project.programme_id"
sector_id = "project_id$sector_project.sector_id"
hazard_id = "project_id$hazard_project.hazard_id"
theme_id = "project_id$theme_project.theme_id"
# Which levels of location hierarchy are we using?
#levels = current.gis.get_relevant_hierarchy_levels()
# Filter Widgets
filter_widgets = [
S3TextFilter(["project_id$name",
"project_id$code",
"project_id$description",
"project_id$organisation.name",
"project_id$organisation.acronym",
],
label = T("Search"),
_class = "filter-search",
),
S3OptionsFilter("project_id",
#hidden = True,
),
S3OptionsFilter("parameter_id",
label = T("Indicator"),
#hidden = True,
),
#S3OptionsFilter("year",
# operator = "anyof",
# options = lambda: \
# self.stats_year_options("project_indicator_data"),
# hidden = True,
# ),
#S3LocationFilter("location_id",
# levels = levels,
# #hidden = True,
# ),
]
if programmes:
filter_widgets.insert(0, S3OptionsFilter(programme_id))
if sectors:
filter_widgets.insert(0, S3OptionsFilter(sector_id))
if themes:
filter_widgets.append(S3OptionsFilter(theme_id))
# List fields
list_fields = ["project_id",
(T("Indicator"), "parameter_id"),
"value",
"target_value",
"date",
#"year",
]
if settings.get_project_programmes():
list_fields.insert(0, programme_id)
# Report axes
report_fields = [(T("Indicator"), "parameter_id"),
"project_id",
#"project_location_id",
"date",
#"year",
]
add_report_field = report_fields.append
if programmes:
add_report_field(programme_id)
if sectors:
add_report_field(sector_id)
if hazards:
add_report_field(hazard_id)
if themes:
add_report_field(theme_id)
# Location levels (append to list fields and report axes)
#for level in levels:
# lfield = "location_id$%s" % level
# list_fields.append(lfield)
# add_report_field(lfield)
#if "L0" in levels:
# default_row = "location_id$L0"
#elif "L1" in levels:
# default_row = "location_id$L1"
#else:
default_row = "project_id"
# Report options and defaults
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = [(T("Value of Indicator"),
"sum(value)",
),
(T("Target Value of Indicator"),
"sum(target_value)",
),
],
defaults = Storage(rows=default_row,
cols="parameter_id",
fact="sum(value)",
totals=True
),
)
# Resource configuration
configure(tablename,
deduplicate = self.project_indicator_data_deduplicate,
filter_widgets = filter_widgets,
list_fields = list_fields,
report_options = report_options,
super_entity = "stats_data",
)
# Pass names back to global scope (s3.*)
return {}
# ---------------------------------------------------------------------
@staticmethod
def project_indicator_data_deduplicate(item):
""" Import item de-duplication """
data = item.data
parameter_id = data.get("parameter_id")
project_id = data.get("project_id")
start_date = data.get("date")
# Match indicator_data by indicator, project and date
if parameter_id and project_id and start_date:
table = item.table
query = (table.parameter_id == parameter_id) & \
(table.project_id == project_id) & \
(table.date == start_date)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3ProjectLocationModel(S3Model):
"""
Project Location Model
- these can simply be ways to display a Project on the Map
or these can be 'Communities'
"""
names = ("project_location",
"project_location_id",
"project_location_contact",
"project_location_represent",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
settings = current.deployment_settings
community = settings.get_project_community()
mode_3w = settings.get_project_mode_3w()
messages = current.messages
NONE = messages["NONE"]
COUNTRY = messages.COUNTRY
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
# ---------------------------------------------------------------------
# Project Location ('Community')
#
tablename = "project_location"
define_table(tablename,
self.super_link("doc_id", "doc_entity"),
# Populated onaccept - used for map popups
Field("name",
writable = False,
),
self.project_project_id(),
# Enable in templates which desire this:
self.project_status_id(readable = False,
writable = False,
),
self.gis_location_id(
represent = self.gis_LocationRepresent(sep=", "),
requires = IS_LOCATION(),
widget = S3LocationAutocompleteWidget(),
comment = S3AddResourceLink(c="gis",
f="location",
label = T("Create Location"),
title=T("Location"),
tooltip=messages.AUTOCOMPLETE_HELP),
),
# % breakdown by location
Field("percentage", "decimal(3,2)",
comment = T("Amount of the Project Budget spent at this location"),
default = 0,
label = T("Percentage"),
readable = mode_3w,
requires = IS_DECIMAL_IN_RANGE(0, 1),
writable = mode_3w,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
if community:
LOCATION = T("Community")
LOCATION_TOOLTIP = T("If you don't see the community in the list, you can add a new one by clicking link 'Create Community'.")
ADD_LOCATION = T("Create Community")
crud_strings[tablename] = Storage(
label_create = ADD_LOCATION,
title_display = T("Community Details"),
title_list = T("Communities"),
title_update = T("Edit Community Details"),
title_upload = T("Import Community Data"),
title_report = T("3W Report"),
title_map = T("Map of Communities"),
label_list_button = T("List Communities"),
msg_record_created = T("Community Added"),
msg_record_modified = T("Community Updated"),
msg_record_deleted = T("Community Deleted"),
msg_list_empty = T("No Communities Found")
)
else:
LOCATION = T("Location")
LOCATION_TOOLTIP = T("If you don't see the location in the list, you can add a new one by clicking link 'Create Location'.")
ADD_LOCATION = T("Create Location")
crud_strings[tablename] = Storage(
label_create = ADD_LOCATION,
title_display = T("Location Details"),
title_list = T("Locations"),
title_update = T("Edit Location Details"),
title_upload = T("Import Location Data"),
title_report = T("3W Report"),
title_map = T("Map of Projects"),
label_list_button = T("List Locations"),
msg_record_created = T("Location Added"),
msg_record_modified = T("Location updated"),
msg_record_deleted = T("Location Deleted"),
msg_list_empty = T("No Locations Found")
)
# Fields to search by Text
text_fields = []
tappend = text_fields.append
# List fields
list_fields = ["location_id",
]
lappend = list_fields.append
# Report options
report_fields = []
rappend = report_fields.append
for level in levels:
loc_field = "location_id$%s" % level
lappend(loc_field)
rappend(loc_field)
tappend(loc_field)
lappend("project_id")
if settings.get_project_theme_percentages():
lappend((T("Themes"), "project_id$theme_project.theme_id"))
else:
lappend((T("Activity Types"), "activity_type.name"))
lappend("comments")
# Filter widgets
if community:
filter_widgets = [
S3TextFilter(text_fields,
label = T("Name"),
comment = T("Search for a Project Community by name."),
)
]
else:
text_fields.extend(("project_id$name",
"project_id$code",
"project_id$description",
))
filter_widgets = [
S3TextFilter(text_fields,
label = T("Text"),
comment = T("Search for a Project by name, code, location, or description."),
)
]
if settings.get_project_sectors():
filter_widgets.append(S3OptionsFilter("project_id$sector.name",
label = T("Sector"),
hidden = True,
))
if settings.get_project_programmes():
programme_id = "project_id$programme_project.programme_id"
filter_widgets.append(S3OptionsFilter(programme_id,
hidden=True,
))
rappend((T("Program"), programme_id))
filter_widgets.extend((
# This is only suitable for deployments with a few projects
S3OptionsFilter("project_id",
label = T("Project"),
hidden = True,
),
S3OptionsFilter("project_id$theme_project.theme_id",
label = T("Theme"),
options = lambda: \
get_s3_filter_opts("project_theme",
translate=True),
hidden = True,
),
S3LocationFilter("location_id",
levels = levels,
hidden = True,
),
))
report_fields.extend(((messages.ORGANISATION, "project_id$organisation_id"),
(T("Project"), "project_id"),
(T("Activity Types"), "activity_type.activity_type_id"),
))
# Report options and default
report_options = Storage(rows=report_fields,
cols=report_fields,
fact=report_fields,
defaults=Storage(rows="location_id$%s" % levels[0], # Highest-level of Hierarchy
cols="project_id",
fact="list(activity_type.activity_type_id)",
totals=True,
),
)
# Resource Configuration
configure(tablename,
context = {"project": "project_id",
},
create_next = URL(c="project", f="location",
args=["[id]", "beneficiary"]),
deduplicate = self.project_location_deduplicate,
filter_widgets = filter_widgets,
list_fields = list_fields,
onaccept = self.project_location_onaccept,
report_options = report_options,
super_entity = "doc_entity",
)
# Components
add_components(tablename,
# Activity Types
project_activity_type = {"link": "project_activity_type_location",
"joinby": "project_location_id",
"key": "activity_type_id",
"actuate": "hide",
},
# Beneficiaries
project_beneficiary = "project_location_id",
# Contacts
pr_person = {"name": "contact",
"link": "project_location_contact",
"joinby": "project_location_id",
"key": "person_id",
"actuate": "hide",
"autodelete": False,
},
# Distributions (not implemented yet)
#supply_distribution = "project_location_id",
# Themes
project_theme = {"link": "project_theme_location",
"joinby": "project_location_id",
"key": "theme_id",
"actuate": "hide",
},
)
# Reusable Field
project_location_represent = project_LocationRepresent()
project_location_id = S3ReusableField("project_location_id", "reference %s" % tablename,
label = LOCATION,
ondelete = "CASCADE",
represent = project_location_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_location.id",
project_location_represent,
updateable = True,
sort=True)),
comment = S3AddResourceLink(ADD_LOCATION,
c="project", f="location",
tooltip=LOCATION_TOOLTIP),
)
# ---------------------------------------------------------------------
# Project Community Contact Person
#
tablename = "project_location_contact"
define_table(tablename,
project_location_id(),
self.pr_person_id(
comment = None,
requires = IS_ADD_PERSON_WIDGET2(),
widget = S3AddPersonWidget2(controller="pr"),
),
*s3_meta_fields())
# CRUD Strings
LIST_OF_CONTACTS = T("Community Contacts")
crud_strings[tablename] = Storage(
label_create = T("Add Contact"), # Better language for 'Select or Create'
title_display = T("Contact Details"),
title_list = T("Contacts"),
title_update = T("Edit Contact Details"),
label_list_button = T("List Contacts"),
msg_record_created = T("Contact Added"),
msg_record_modified = T("Contact Updated"),
msg_record_deleted = T("Contact Deleted"),
msg_list_empty = T("No Contacts Found"))
# Filter Widgets
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$middle_name",
"person_id$last_name"
],
label = T("Name"),
comment = T("You can search by person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons."),
),
S3LocationFilter("project_location_id$location_id",
levels = levels,
hidden = True,
),
]
# Resource configuration
configure(tablename,
filter_widgets = filter_widgets,
list_fields = ["person_id",
(T("Email"), "email.value"),
(T("Mobile Phone"), "phone.value"),
"project_location_id",
(T("Project"), "project_location_id$project_id"),
],
)
# Components
add_components(tablename,
# Contact Information
pr_contact = (# Email
{"name": "email",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": "contact_method",
"filterfor": ("EMAIL",),
},
# Mobile Phone
{"name": "phone",
"link": "pr_person",
"joinby": "id",
"key": "pe_id",
"fkey": "pe_id",
"pkey": "person_id",
"filterby": "contact_method",
"filterfor": ("SMS",),
},
),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(project_location_id = project_location_id,
project_location_represent = project_location_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for model-global names if module is disabled """
project_location_id = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(project_location_id = lambda **attr: dummy("project_location_id"),
project_location_represent = lambda v, row=None: "",
)
# -------------------------------------------------------------------------
@staticmethod
def project_location_onaccept(form):
"""
Calculate the 'name' field used by Map popups
"""
vars = form.vars
id = vars.id
if vars.location_id and vars.project_id:
name = current.s3db.project_location_represent(None, vars)
elif id:
name = current.s3db.project_location_represent(id)
else:
return None
if len(name) > 512:
# Ensure we don't break limits of SQL field
name = name[:509] + "..."
db = current.db
db(db.project_location.id == id).update(name=name)
# -------------------------------------------------------------------------
@staticmethod
def project_location_deduplicate(item):
""" Import item de-duplication """
data = item.data
project_id = data.get("project_id")
location_id = data.get("location_id")
if project_id and location_id:
table = item.table
query = (table.project_id == project_id) & \
(table.location_id == location_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3ProjectOrganisationModel(S3Model):
"""
Project Organisation Model
"""
names = ("project_organisation",)
def model(self):
T = current.T
settings = current.deployment_settings
messages = current.messages
NONE = messages["NONE"]
# ---------------------------------------------------------------------
# Project Organisations
# for multi_orgs=True
#
project_organisation_roles = settings.get_project_organisation_roles()
organisation_help = T("Add all organizations which are involved in different roles in this project")
tablename = "project_organisation"
self.define_table(tablename,
self.project_project_id(
comment=S3AddResourceLink(c="project",
f="project",
vars = dict(prefix="project"),
tooltip=T("If you don't see the project in the list, you can add a new one by clicking link 'Create Project'."),
)
),
self.org_organisation_id(
requires = self.org_organisation_requires(
required=True,
# Need to be able to add Partners/Donors not just Lead org
#updateable=True,
),
widget = None,
comment=S3AddResourceLink(c="org",
f="organisation",
label=T("Create Organization"),
title=messages.ORGANISATION,
tooltip=organisation_help)
),
Field("role", "integer",
label = T("Role"),
requires = IS_EMPTY_OR(
IS_IN_SET(project_organisation_roles)
),
represent = lambda opt: \
project_organisation_roles.get(opt,
NONE)),
Field("amount", "double",
requires = IS_EMPTY_OR(
IS_FLOAT_AMOUNT()),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
widget = IS_FLOAT_AMOUNT.widget,
label = T("Funds Contributed")),
s3_currency(),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Organization to Project"),
title_display = T("Project Organization Details"),
title_list = T("Project Organizations"),
title_update = T("Edit Project Organization"),
title_upload = T("Import Project Organizations"),
title_report = T("Funding Report"),
label_list_button = T("List Project Organizations"),
label_delete_button = T("Remove Organization from Project"),
msg_record_created = T("Organization added to Project"),
msg_record_modified = T("Project Organization updated"),
msg_record_deleted = T("Organization removed from Project"),
msg_list_empty = T("No Organizations for Project(s)"))
# Report Options
report_fields = ["project_id",
"organisation_id",
"role",
"amount",
"currency",
]
if settings.get_project_programmes():
report_fields.insert(0, "project_id$programme_project.programme_id")
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
defaults = Storage(rows = "organisation_id",
cols = "currency",
fact = "sum(amount)",
totals = False
)
)
# Resource Configuration
self.configure(tablename,
context = {"project": "project_id",
},
deduplicate = self.project_organisation_deduplicate,
onaccept = self.project_organisation_onaccept,
ondelete = self.project_organisation_ondelete,
onvalidation = self.project_organisation_onvalidation,
report_options = report_options,
)
# Pass names back to global scope (s3.*)
return {}
# -------------------------------------------------------------------------
@staticmethod
def project_organisation_onvalidation(form, lead_role=None):
""" Form validation """
if lead_role is None:
lead_role = current.deployment_settings.get_project_organisation_lead_role()
vars = form.vars
project_id = vars.project_id
organisation_id = vars.organisation_id
if str(vars.role) == str(lead_role) and project_id:
db = current.db
otable = db.project_organisation
query = (otable.deleted != True) & \
(otable.project_id == project_id) & \
(otable.role == lead_role) & \
(otable.organisation_id != organisation_id)
row = db(query).select(otable.id,
limitby=(0, 1)).first()
if row:
form.errors.role = \
current.T("Lead Implementer for this project is already set, please choose another role.")
# -------------------------------------------------------------------------
@staticmethod
def project_organisation_onaccept(form):
"""
Record creation post-processing
If the added organisation is the lead role, set the
project.organisation to point to the same organisation
& update the realm_entity.
"""
vars = form.vars
if str(vars.role) == \
str(current.deployment_settings.get_project_organisation_lead_role()):
# Read the record
# (safer than relying on vars which might be missing on component tabs)
db = current.db
ltable = db.project_organisation
record = db(ltable.id == vars.id).select(ltable.project_id,
ltable.organisation_id,
limitby=(0, 1)
).first()
# Set the Project's organisation_id to the new lead organisation
organisation_id = record.organisation_id
ptable = db.project_project
db(ptable.id == record.project_id).update(
organisation_id = organisation_id,
realm_entity = \
current.s3db.pr_get_pe_id("org_organisation",
organisation_id)
)
# -------------------------------------------------------------------------
@staticmethod
def project_organisation_ondelete(row):
"""
Executed when a project organisation record is deleted.
If the deleted organisation is the lead role on this project,
set the project organisation to None.
"""
db = current.db
potable = db.project_organisation
ptable = db.project_project
query = (potable.id == row.get("id"))
deleted_row = db(query).select(potable.deleted_fk,
potable.role,
limitby=(0, 1)).first()
if str(deleted_row.role) == \
str(current.deployment_settings.get_project_organisation_lead_role()):
# Get the project_id
deleted_fk = json.loads(deleted_row.deleted_fk)
project_id = deleted_fk["project_id"]
# Set the project organisation_id to NULL (using None)
db(ptable.id == project_id).update(organisation_id=None)
# ---------------------------------------------------------------------
@staticmethod
def project_organisation_deduplicate(item):
""" Import item de-duplication """
data = item.data
project_id = data.get("project_id")
organisation_id = data.get("organisation_id")
if project_id and organisation_id:
table = item.table
query = (table.project_id == project_id) & \
(table.organisation_id == organisation_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3ProjectPlanningModel(S3Model):
"""
Project Planning Model
Goals (Objectives)
Outcomes
Outputs
Indicators
This module currently assumes discrete values for each period
@ToDo: deployment_setting to use cumulative?
"""
names = ("project_goal",
#"project_goal_id",
"project_goal_represent",
"project_outcome",
#"project_outcome_id",
"project_outcome_represent",
"project_output",
#"project_output_id",
"project_output_represent",
"project_indicator",
#"project_indicator_id",
"project_indicator_represent",
"project_indicator_data",
)
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
NONE = current.messages["NONE"]
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
use_goals = settings.get_project_goals()
use_outcomes = settings.get_project_outcomes()
use_outputs = settings.get_project_outputs()
inline = use_outputs == "inline"
project_id = self.project_project_id
# ---------------------------------------------------------------------
# Goals / Objectives
#
tablename = "project_goal"
define_table(tablename,
project_id(),
Field("code",
label = T("Code"),
represent = lambda v: v or NONE,
),
Field("name", "text",
label = T("Description"),
represent = lambda v: v or NONE,
widget = s3_comments_widget,
),
Field("weighting", "float",
default = 0.0,
label = T("Weighting"),
requires = IS_FLOAT_IN_RANGE(0, 1),
),
# Used to highlight cells with issues?
#Field("weightings_ok", "boolean",
# default = True,
# readable = False,
# writable = False,
# ),
Field("current_status", "float",
label = T("Current Status"),
represent = project_status_represent,
# Calculated onaccept of Indicator Data
writable = False,
),
Field("overall_status", "float",
label = T("Overall Status"),
represent = project_status_represent,
# Calculated onaccept of Indicator Data
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Goal"),
title_display = T("Goal"),
title_list = T("Goals"),
title_update = T("Edit Goal"),
label_list_button = T("List Goals"),
msg_record_created = T("Goal added"),
msg_record_modified = T("Goal updated"),
msg_record_deleted = T("Goal deleted"),
msg_list_empty = T("No goals defined")
)
configure(tablename,
create_onaccept = self.project_goal_create_onaccept,
deduplicate = self.project_goal_deduplicate,
list_fields = ["code",
"name",
"weighting",
"current_status",
"overall_status",
],
onaccept = self.project_goal_onaccept,
)
# Reusable Field
goal_represent = S3Represent(lookup=tablename, fields=("code", "name"))
goal_id = S3ReusableField("goal_id", "reference %s" % tablename,
label = T("Goal"),
ondelete = "CASCADE",
represent = goal_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_goal.id",
goal_represent,
sort = True,
)
),
sortby = "name",
#comment = S3AddResourceLink(c="project", f="goal"),
)
# ---------------------------------------------------------------------
# Outcomes
#
tablename = "project_outcome"
define_table(tablename,
project_id(),
goal_id(readable = use_goals,
writable = use_goals,
),
Field("code",
label = T("Code"),
represent = lambda v: v or NONE,
),
Field("name", "text",
label = T("Description"),
represent = lambda v: v or NONE,
widget = s3_comments_widget,
),
Field("weighting", "float",
default = 0.0,
label = T("Weighting"),
requires = IS_FLOAT_IN_RANGE(0, 1),
),
Field("current_status", "float",
label = T("Current Status"),
represent = project_status_represent,
# Calculated onaccept of Indicator Data
writable = False,
),
Field("overall_status", "float",
label = T("Overall Status"),
represent = project_status_represent,
# Calculated onaccept of Indicator Data
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Outcome"),
title_display = T("Outcome"),
title_list = T("Outcomes"),
title_update = T("Edit Outcome"),
label_list_button = T("List Outcomes"),
msg_record_created = T("Outcome added"),
msg_record_modified = T("Outcome updated"),
msg_record_deleted = T("Outcome deleted"),
msg_list_empty = T("No outcomes defined")
)
configure(tablename,
create_onaccept = self.project_outcome_create_onaccept,
deduplicate = self.project_outcome_deduplicate,
list_fields = ["goal_id",
"code",
"name",
"weighting",
"current_status",
"overall_status",
],
onaccept = self.project_outcome_onaccept,
)
# Reusable Field
outcome_represent = S3Represent(lookup=tablename, fields=("code", "name"))
outcome_id = S3ReusableField("outcome_id", "reference %s" % tablename,
label = T("Outcome"),
ondelete = "CASCADE",
represent = outcome_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_outcome.id",
outcome_represent,
sort = True,
)
),
sortby = "name",
#comment = S3AddResourceLink(c="project", f="outcome"),
)
# ---------------------------------------------------------------------
# Outputs
#
tablename = "project_output"
define_table(tablename,
project_id(
# Override requires so that update access to the projects isn't required
requires = IS_ONE_OF(db, "project_project.id",
self.project_project_represent
)
),
goal_id(readable = use_goals and not use_outcomes,
writable = use_goals and not use_outcomes,
),
outcome_id(readable = use_outcomes,
writable = use_outcomes,
),
Field("code",
label = T("Code"),
represent = lambda v: v or NONE,
readable = not inline,
writable = not inline,
),
Field("name", "text",
label = T("Output") if inline else T("Description"),
represent = lambda v: v or NONE,
widget = s3_comments_widget,
),
Field("weighting", "float",
default = 0.0,
label = T("Weighting"),
requires = IS_FLOAT_IN_RANGE(0, 1),
),
Field("current_status", "float",
label = T("Current Status"),
represent = project_status_represent,
# Calculated onaccept of Indicator Data
writable = False,
),
Field("overall_status", "float",
label = T("Overall Status"),
represent = project_status_represent,
# Calculated onaccept of Indicator Data
writable = False,
),
# Legacy field from DRRPP
Field("status",
label = T("Status"),
represent = lambda v: v or NONE,
readable = inline,
writable = inline,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Output"),
title_display = T("Output"),
title_list = T("Outputs"),
title_update = T("Edit Output"),
label_list_button = T("List Outputs"),
msg_record_created = T("Output added"),
msg_record_modified = T("Output updated"),
msg_record_deleted = T("Output deleted"),
msg_list_empty = T("No outputs defined")
)
configure(tablename,
create_onaccept = self.project_output_create_onaccept if not inline else None,
deduplicate = self.project_output_deduplicate,
list_fields = ["outcome_id",
"code",
"name",
"weighting",
"current_status",
"overall_status",
],
onaccept = self.project_output_onaccept if not inline else None,
)
# Reusable Field
output_represent = S3Represent(lookup=tablename, fields=("code", "name"))
output_id = S3ReusableField("output_id", "reference %s" % tablename,
label = T("Output"),
ondelete = "CASCADE",
represent = output_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_output.id",
output_represent,
sort = True,
)
),
sortby = "name",
#comment = S3AddResourceLink(c="project", f="output"),
)
# ---------------------------------------------------------------------
# Indicators
#
tablename = "project_indicator"
define_table(tablename,
project_id(),
goal_id(readable = use_goals and not use_outcomes and not use_outputs,
writable = use_goals and not use_outcomes and not use_outputs,
),
outcome_id(readable = use_outcomes and not use_outputs,
writable = use_outcomes and not use_outputs,
),
output_id(readable = use_outputs,
writable = use_outputs,
),
Field("code",
label = T("Code"),
represent = lambda v: v or NONE,
),
Field("name", "text",
label = T("Description"),
represent = lambda v: v or NONE,
widget = s3_comments_widget,
),
Field("weighting", "float",
default = 0.0,
label = T("Weighting"),
requires = IS_FLOAT_IN_RANGE(0, 1),
),
Field("current_status", "float",
label = T("Current Status"),
represent = project_status_represent,
# Calculated onaccept of Indicator Data
writable = False,
),
Field("overall_status", "float",
label = T("Overall Status"),
represent = project_status_represent,
# Calculated onaccept of Indicator Data
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Indicator"),
title_display = T("Indicator"),
title_list = T("Indicators"),
title_update = T("Edit Indicator"),
label_list_button = T("List Indicators"),
msg_record_created = T("Indicator added"),
msg_record_modified = T("Indicator updated"),
msg_record_deleted = T("Indicator deleted"),
msg_list_empty = T("No indicators defined")
)
configure(tablename,
create_onaccept = self.project_indicator_create_onaccept,
deduplicate = self.project_indicator_deduplicate,
list_fields = ["output_id",
"code",
"name",
"weighting",
"current_status",
"overall_status",
],
onaccept = self.project_indicator_onaccept,
orderby = "project_indicator.output_id",
)
# Reusable Field
# @ToDo: deployment_setting as to whether to show hierarchy or not
# (HNRC add the hierarchy manually in codes, so no need for them)
indicator_represent = S3Represent(lookup=tablename, fields=("code", "name"))
indicator_id = S3ReusableField("indicator_id", "reference %s" % tablename,
label = T("Indicator"),
ondelete = "CASCADE",
represent = indicator_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_indicator.id",
indicator_represent,
sort = True,
)
),
sortby = "name",
#comment = S3AddResourceLink(c="project", f="indicator"),
)
# ---------------------------------------------------------------------
# Indicator Data
#
tablename = "project_indicator_data"
define_table(tablename,
project_id(
# Override requires so that update access to the projects isn't required
requires = IS_ONE_OF(db, "project_project.id",
self.project_project_represent
)
),
indicator_id(),
# Populated Automatically
# Used for Timeplot &, in future, to ease changing the monitoring frequency
s3_date("start_date",
readable = False,
writable = False,
),
s3_date("end_date",
empty = False,
label = T("Date"),
),
Field("target_value", "integer",
label = T("Target Value"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)),
),
Field("value", "integer",
label = T("Actual Value"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)),
),
Field.Method("percentage", self.project_indicator_percentage),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Indicator Data"),
title_display = T("Indicator Data"),
title_list = T("Indicator Data"),
title_update = T("Edit Indicator Data"),
label_list_button = T("List Indicator Data"),
msg_record_created = T("Indicator Data added"),
msg_record_modified = T("Indicator Data updated"),
msg_record_deleted = T("Indicator Data removed"),
msg_list_empty = T("No indicator data defined")
)
report_options = {"rows": ["indicator_id", "end_date"],
"cols": ["indicator_id", "end_date"],
"fact": [(T("Target Value"), "avg(target_value)"),
(T("Actual Value"), "avg(value)"),
# Not working (because percentage-Method returns a string
# not a number, so no average calculation possible),
# list(avg) may do it, though.
#(T("Percentage"), "avg(percentage)"),
(T("Percentage"), "list(percentage)"),
(T("Comparison"), [(T("Actual Value"), "avg(value)"),
(T("Target Value"), "avg(target_value)"),
],
),
],
"defaults": {"rows": "indicator_id",
"cols": "end_date",
#"fact": "avg(percentage)",
"fact": "avg(value)",
"totals": False,
},
}
self.configure(tablename,
list_fields = ["indicator_id",
"end_date",
"target_value",
"value",
(T("Percentage"), "percentage"),
"comments",
],
onaccept = self.project_indicator_data_onaccept,
report_options = report_options,
)
# Pass names back to global scope (s3.*)
return dict(#project_goal_id = goal_id,
project_goal_represent = goal_represent,
#project_outcome_id = outcome_id,
project_outcome_represent = outcome_represent,
#project_output_id = output_id,
project_output_represent = output_represent,
#project_indicator_id = indicator_id,
project_indicator_represent = indicator_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def project_planning_status_update(project_id):
"""
Update the status fields of the different Project levels
Fired onaccept of:
project_indicator_data
project_indicator (weightings may have changed)
project_output (weightings may have changed)
project_outcome (weightings may have changed)
project_goal (weightings may have changed)
@ToDo: Handle deployment_settings for which levels are exposed
"""
db = current.db
s3db = current.s3db
project = None
goals = {}
outcomes = {}
outputs = {}
indicators = {}
# Read all of the Indicator Data for this Project
table = s3db.project_indicator_data
query = (table.project_id == project_id) & \
(table.deleted == False)
indicator_data = db(query).select(table.indicator_id,
table.target_value,
table.value,
table.end_date,
)
for d in indicator_data:
indicator_id = d.indicator_id
target_value = d.target_value
value = d.value
end_date = d.end_date
if indicator_id not in indicators:
indicators[indicator_id] = {"total_target": target_value,
"total_value": value,
"current_target": target_value,
"current_value": value,
"current_date": end_date,
}
else:
# Add this data to Totals
i = indicators[indicator_id]
i["total_target"] = i["total_target"] + target_value
i["total_value"] = i["total_value"] + value
if end_date > i["current_date"]:
# Replace the Current data
i.update(current_target = target_value,
current_value = value,
current_date = end_date)
# Read all of the Indicators for this Project
table = s3db.project_indicator
query = (table.project_id == project_id) & \
(table.deleted == False)
rows = db(query).select(table.id,
#table.goal_id,
#table.outcome_id,
table.output_id,
table.weighting,
)
for r in rows:
indicator_id = r.id
if indicator_id not in indicators:
# We have no data for this indicator, so assume complete
current_status = overall_status = 100.0
else:
i = indicators[indicator_id]
total_target = i["total_target"]
total_value = i["total_value"]
current_target = i["current_target"]
current_value = i["current_value"]
if total_target == 0.0:
# Assume complete
overall_status = 100.0
elif total_value in (0.0, None):
overall_status = 0.0
else:
overall_status = total_value / total_target * 100
if current_target == 0.0:
# Assume complete
current_status = 100.0
elif current_value in (0.0, None):
current_status = 0.0
else:
current_status = current_value / current_target * 100
# Populate Outputs dict
output_id = r.output_id
weighting = r.weighting
if output_id not in outputs:
outputs[output_id] = {"current_status": current_status * weighting,
"overall_status": overall_status * weighting,
"total_weighting": weighting,
}
else:
o = outputs[output_id]
o.update(current_status = o["current_status"] + (current_status * weighting),
overall_status = o["overall_status"] + (overall_status * weighting),
total_weighting = o["total_weighting"] + weighting,
)
# Update Indicator Status
r.update_record(current_status = current_status,
overall_status = overall_status,
)
# Read all of the Outputs for this Project
table = s3db.project_output
query = (table.project_id == project_id) & \
(table.deleted == False)
rows = db(query).select(table.id,
#table.goal_id,
table.outcome_id,
table.weighting,
)
for r in rows:
output_id = r.id
if output_id not in outputs:
# We have no data for this output, so assume complete
current_status = overall_status = 100.0
else:
o = outputs[output_id]
total_weighting = o["total_weighting"]
current_status = o["current_status"] / total_weighting
overall_status = o["overall_status"] / total_weighting
# Populate Outcomes dict
outcome_id = r.outcome_id
weighting = r.weighting
if outcome_id not in outcomes:
outcomes[outcome_id] = {"current_status": current_status * weighting,
"overall_status": overall_status * weighting,
"total_weighting": weighting,
}
else:
o = outcomes[outcome_id]
o.update(current_status = o["current_status"] + (current_status * weighting),
overall_status = o["overall_status"] + (overall_status * weighting),
total_weighting = o["total_weighting"] + weighting,
)
# Update Output Status
r.update_record(current_status = current_status,
overall_status = overall_status,
)
# Read all of the Outcomes for this Project
table = s3db.project_outcome
query = (table.project_id == project_id) & \
(table.deleted == False)
rows = db(query).select(table.id,
table.goal_id,
table.weighting,
)
for r in rows:
outcome_id = r.id
if outcome_id not in outcomes:
# We have no data for this outcome, so assume complete
current_status = overall_status = 100.0
else:
o = outcomes[outcome_id]
total_weighting = o["total_weighting"]
current_status = o["current_status"] / total_weighting
overall_status = o["overall_status"] / total_weighting
# Populate Goals dict
goal_id = r.goal_id
weighting = r.weighting
if goal_id not in goals:
goals[goal_id] = {"current_status": current_status * weighting,
"overall_status": overall_status * weighting,
"total_weighting": weighting,
}
else:
g = goals[goal_id]
g.update(current_status = g["current_status"] + (current_status * weighting),
overall_status = g["overall_status"] + (overall_status * weighting),
total_weighting = g["total_weighting"] + weighting,
)
# Update Outcome Status
r.update_record(current_status = current_status,
overall_status = overall_status,
)
# Read all of the Goals for this Project
table = s3db.project_goal
query = (table.project_id == project_id) & \
(table.deleted == False)
rows = db(query).select(table.id,
table.weighting,
)
for r in rows:
goal_id = r.id
if goal_id not in goals:
# We have no data for this goal, so assume complete
current_status = overall_status = 100.0
else:
g = goals[goal_id]
total_weighting = g["total_weighting"]
current_status = g["current_status"] / total_weighting
overall_status = g["overall_status"] / total_weighting
# Populate Project dict
weighting = r.weighting
if project is None:
project = {"current_status": current_status * weighting,
"overall_status": overall_status * weighting,
"total_weighting": weighting,
}
else:
project.update(current_status = project["current_status"] + (current_status * weighting),
overall_status = project["overall_status"] + (overall_status * weighting),
total_weighting = project["total_weighting"] + weighting,
)
# Update Goal Status
r.update_record(current_status = current_status,
overall_status = overall_status,
)
# Update Project Status
total_weighting = project["total_weighting"]
current_status = project["current_status"] / total_weighting
overall_status = project["overall_status"] / total_weighting
table = s3db.project_project
db(table.id == project_id).update(current_status_by_indicators = current_status,
overall_status_by_indicators = overall_status,
)
# -------------------------------------------------------------------------
@staticmethod
def project_goal_deduplicate(item):
""" Import item de-duplication """
data = item.data
name = data.get("name")
if name:
table = item.table
query = (table.name == name)
project_id = data.get("project_id")
if project_id:
query &= ((table.project_id == project_id) | \
(table.project_id == None))
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
def project_goal_create_onaccept(self, form):
"""
Default all weightings to an even spread
"""
db = current.db
record_id = form.vars.id
# Find the project_id
table = current.s3db.project_goal
record = db(table.id == record_id).select(table.project_id,
limitby=(0, 1)
).first()
try:
project_id = record.project_id
except:
s3_debug("Cannot find Project Goal record (no record for this ID), so can't setup default weightings")
return
# Read the records
query = (table.project_id == project_id) & \
(table.deleted == False)
records = db(query).select(table.id)
weighting = 1.0 / len(records)
for r in records:
# Set the weighting
r.update_record(weighting = weighting)
# Fire normal onaccept
self.project_goal_onaccept(form, create=True)
# -------------------------------------------------------------------------
def project_goal_onaccept(self, form, create=False):
"""
Warn if total weightings are not equal to 1.0
Update Project Status
"""
db = current.db
form_vars = form.vars
record_id = form_vars.id
# Find the project_id
table = current.s3db.project_goal
record = db(table.id == record_id).select(table.project_id,
limitby=(0, 1)
).first()
try:
project_id = record.project_id
except:
s3_debug("Cannot find Project Goal record (no record for this ID), so can't update statuses or validate weighting")
return
if not create:
# Read the total Weightings
query = (table.project_id == project_id) & \
(table.deleted == False) & \
(table.id != record_id)
records = db(query).select(table.weighting)
total = 0
for r in records:
total += r.weighting
# Add what we're trying to add
total += form_vars.weighting
# Check if we're on 1.0
if total <> 1.0:
current.response.warning = current.T("Weightings should add up to 1.0")
# Update Statuses
self.project_planning_status_update(project_id)
# -------------------------------------------------------------------------
@staticmethod
def project_outcome_deduplicate(item):
""" Import item de-duplication """
data = item.data
name = data.get("name")
if name:
table = item.table
query = (table.name == name)
project_id = data.get("project_id")
if project_id:
query &= ((table.project_id == project_id) | \
(table.project_id == None))
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
def project_outcome_create_onaccept(self, form):
"""
Default all weightings to an even spread
@ToDo: Handle deployment_settings which have Outcomes directly
attached to Projects
"""
db = current.db
record_id = form.vars.id
# Find the goal_id
table = current.s3db.project_outcome
record = db(table.id == record_id).select(table.goal_id,
limitby=(0, 1)
).first()
try:
goal_id = record.goal_id
except:
s3_debug("Cannot find Project Outcome record (no record for this ID), so can't setup default weightings")
return
# Read the records
query = (table.goal_id == goal_id) & \
(table.deleted == False)
records = db(query).select(table.id)
weighting = 1.0 / len(records)
for r in records:
# Set the weighting
r.update_record(weighting = weighting)
# Fire normal onaccept
self.project_outcome_onaccept(form, create=True)
# -------------------------------------------------------------------------
def project_outcome_onaccept(self, form, create=False):
"""
Warn if totals are not equal to 1.0
@ToDo: Handle deployment_settings which have Outcomes directly
attached to Projects
"""
db = current.db
record_id = form.record_id
# Find the project_id
table = current.s3db.project_outcome
record = db(table.id == record_id).select(table.goal_id,
table.project_id,
limitby=(0, 1)
).first()
try:
project_id = record.project_id
except:
s3_debug("Cannot find Project Outcome record (no record for this ID), so can't update statuses or validate weighting")
return
if not create:
# Read the total Weightings
query = (table.goal_id == record.goal_id) & \
(table.deleted == False) & \
(table.id != record_id)
records = db(query).select(table.weighting)
total = 0
for r in records:
total += r.weighting
# Add what we're trying to add
total += form.vars.weighting
# Check if we're on 1.0
if total <> 1.0:
current.response.warning = current.T("Weightings should add up to 1.0")
# Update Statuses
self.project_planning_status_update(project_id)
# -------------------------------------------------------------------------
@staticmethod
def project_output_deduplicate(item):
""" Import item de-duplication """
data = item.data
name = data.get("name")
if name:
table = item.table
query = (table.name == name)
project_id = data.get("project_id")
if project_id:
query &= ((table.project_id == project_id) | \
(table.project_id == None))
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
def project_output_create_onaccept(self, form):
"""
Default all weightings to an even spread
@ToDo: Handle deployment_settings which have Outputs directly
attached to Projects
"""
db = current.db
record_id = form.vars.id
# Find the outcome_id
table = current.s3db.project_output
record = db(table.id == record_id).select(table.outcome_id,
limitby=(0, 1)
).first()
try:
outcome_id = record.outcome_id
except:
s3_debug("Cannot find Project Output record (no record for this ID), so can't setup default weightings")
return
# Read the records
query = (table.outcome_id == outcome_id) & \
(table.deleted == False)
records = db(query).select(table.id)
weighting = 1.0 / len(records)
for r in records:
# Set the weighting
r.update_record(weighting = weighting)
# Fire normal onaccept
self.project_output_onaccept(form, create=True)
# -------------------------------------------------------------------------
def project_output_onaccept(self, form, create=False):
"""
Update all ancestor fields from immediate parent
Warn if totals are not equal to 1.0
@ToDo: Handle deployment_settings which have Outputs directly
attached to Projects
Update Project Status at all levels
"""
db = current.db
s3db = current.s3db
form_vars = form.vars
record_id = form_vars.id
table = s3db.project_output
settings = current.deployment_settings
if settings.get_project_outcomes() and \
settings.get_project_goals():
outcome_id = form_vars.get("outcome_id")
if outcome_id:
# Populate the Goal from the Outcome
otable = s3db.project_outcome
outcome = db(otable.id == outcome_id).select(otable.goal_id,
limitby=(0, 1)
).first()
if outcome:
db(table.id == record_id).update(goal_id = outcome.goal_id)
if not create:
# Read the total Weightings
query = (table.outcome_id == outcome_id) & \
(table.deleted == False) & \
(table.id != record_id)
records = db(query).select(table.weighting)
total = 0
for r in records:
total += r.weighting
# Add what we're trying to add
total += form_vars.weighting
# Check if we're on 1.0
if total <> 1.0:
current.response.warning = current.T("Weightings should add up to 1.0")
# Update Statuses
row = db(table.id == record_id).select(table.project_id,
limitby=(0, 1)
).first()
try:
self.project_planning_status_update(row.project_id)
except:
s3_debug("Cannot find Project record (no record for this ID), so can't update statuses")
# -------------------------------------------------------------------------
@staticmethod
def project_indicator_deduplicate(item):
""" Import item de-duplication """
data = item.data
name = data.get("name")
if name:
table = item.table
query = (table.name == name)
project_id = data.get("project_id")
if project_id:
query &= ((table.project_id == project_id) | \
(table.project_id == None))
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
def project_indicator_create_onaccept(self, form):
"""
Default all weightings to an even spread
@ToDo: Handle deployment_settings which have Indicators directly
attached to Projects
"""
db = current.db
record_id = form.vars.id
# Find the output_id
table = current.s3db.project_indicator
record = db(table.id == record_id).select(table.output_id,
limitby=(0, 1)
).first()
try:
output_id = record.output_id
except:
s3_debug("Cannot find Project Indicator record (no record for this ID), so can't setup default weightings")
return
# Read the records
query = (table.output_id == output_id) & \
(table.deleted == False)
records = db(query).select(table.id)
weighting = 1.0 / len(records)
for r in records:
# Set the weighting
r.update_record(weighting = weighting)
# Fire normal onaccept
self.project_indicator_onaccept(form, create=True)
# -------------------------------------------------------------------------
def project_indicator_onaccept(self, form, create=False):
"""
Update all ancestor fields from immediate parent
Warn if totals are not equal to 1.0
@ToDo: Handle deployment_settings which have Indicators directly
attached to Projects
Update Project Status at all levels
"""
db = current.db
s3db = current.s3db
form_vars = form.vars
record_id = form_vars.id
table = s3db.project_indicator
settings = current.deployment_settings
if settings.get_project_outputs() and \
(settings.get_project_outcomes() or \
settings.get_project_goals()):
output_id = form_vars.get("output_id")
if output_id:
# Populate the Goal &/or Outcome from the Output
otable = s3db.project_output
output = db(otable.id == output_id).select(otable.goal_id,
otable.outcome_id,
limitby=(0, 1)
).first()
if output:
db(table.id == record_id).update(goal_id = output.goal_id,
outcome_id = output.outcome_id,
)
if not create:
# Read the total Weightings
query = (table.output_id == output_id) & \
(table.deleted == False) & \
(table.id != record_id)
records = db(query).select(table.weighting)
total = 0
for r in records:
total += r.weighting
# Add what we're trying to add
total += form_vars.weighting
# Check if we're on 1.0
if total <> 1.0:
current.response.warning = current.T("Weightings should add up to 1.0")
elif settings.get_project_outcomes() and \
settings.get_project_goals():
outcome_id = form_vars.get("outcome_id")
if outcome_id:
# Populate the Goal from the Outcome
otable = s3db.project_outcome
outcome = db(otable.id == outcome_id).select(otable.goal_id,
limitby=(0, 1)
).first()
if outcome:
db(table.id == record_id).update(goal_id = outcome.goal_id)
# Update Statuses
row = db(table.id == record_id).select(table.project_id,
limitby=(0, 1)
).first()
try:
self.project_planning_status_update(row.project_id)
except:
s3_debug("Cannot find Project record (no record for this ID), so can't update statuses")
# -------------------------------------------------------------------------
def project_indicator_data_onaccept(self, form):
"""
Handle Updates of entries to reset the hidden start_date
Update Project Status at all levels
"""
db = current.db
s3db = current.s3db
table = s3db.project_indicator_data
record_id = form.vars.id
# Read the Indicator Data record
record = db(table.id == record_id).select(table.indicator_id,
table.start_date,
table.end_date,
limitby=(0, 1)
).first()
try:
indicator_id = record.indicator_id
except:
s3_debug("Cannot find Project Indicator Data record (no record for this ID), so can't update start_date or statuses")
return
start_date = record.start_date
end_date = record.end_date
# Locate the immediately preceding record
query = (table.indicator_id == indicator_id) & \
(table.deleted == False) & \
(table.end_date < end_date)
date_field = table.end_date
record = db(query).select(date_field,
limitby=(0, 1),
orderby=date_field,
).first()
if record and record[date_field] != start_date:
# Update this record's start_date
db(table.id == record_id).update(start_date = record[date_field])
# Locate the immediately succeeding record
query = (table.indicator_id == indicator_id) & \
(table.deleted == False) & \
(table.end_date > end_date)
record = db(query).select(table.id,
table.start_date,
date_field, # Needed for orderby on Postgres
limitby=(0, 1),
orderby=date_field,
).first()
if record and record.start_date != end_date:
# Update that record's start_date
db(table.id == record.id).update(start_date = end_date)
# Update Statuses
table = s3db.project_indicator
row = db(table.id == indicator_id).select(table.project_id,
limitby=(0, 1)
).first()
try:
self.project_planning_status_update(row.project_id)
except:
s3_debug("Cannot find Project record (no record for this ID), so can't update statuses")
# -------------------------------------------------------------------------
@staticmethod
def project_indicator_percentage(row):
"""
Virtual Field to show the percentage completion of the Indicator
"""
if hasattr(row, "project_indicator_data"):
row = row.project_indicator_data
if hasattr(row, "target_value"):
planned = row.target_value
if planned == 0.0:
# Can't divide by Zero
return current.messages["NONE"]
else:
planned = None
if hasattr(row, "value"):
actual = row.value
else:
actual = None
if planned is not None and actual is not None:
percentage = actual / planned * 100
return project_status_represent(percentage)
if hasattr(row, "id"):
# Reload the record
table = current.s3db.project_indicator_data
r = current.db(table.id == row.id).select(table.target_value,
table.value,
limitby=(0, 1)
).first()
if r:
planned = r.target_value
value = r.value
if planned and value:
if planned == 0.0:
# Can't divide by Zero
return current.messages["NONE"]
percentage = value / planned * 100
return project_status_represent(percentage)
else:
return project_status_represent(0.0)
return current.messages["NONE"]
# =============================================================================
def project_status_represent(value):
"""
Colour-coding of Statuses
@ToDo: Configurable thresholds
"""
if value >= 86:
colour = "00ff00" # Green
elif value >= 66:
colour = "ffff00" # Yellow
else:
colour = "ff0000" # Red
# Represent the number
represent = IS_FLOAT_AMOUNT.represent(value, precision=2)
return SPAN(represent,
# @ToDo: Use CSS
_style = "background:#%s;padding:5px" % colour,
)
# =============================================================================
class S3ProjectProgrammeModel(S3Model):
"""
Project Programme Model
"""
names = ("project_programme",
"project_programme_id",
"project_programme_project",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
NONE = current.messages["NONE"]
# ---------------------------------------------------------------------
# Project Programmes
#
tablename = "project_programme"
define_table(tablename,
self.org_organisation_id(),
Field("name",
label = T("Title"),
represent = lambda v: T(v) if v is not None \
else NONE,
requires = IS_NOT_EMPTY()
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Program"),
title_display = T("Program"),
title_list = T("Programs"),
title_update = T("Edit Program"),
title_upload = T("Import Programs"),
label_list_button = T("List Programs"),
msg_record_created = T("Program created"),
msg_record_modified = T("Program updated"),
msg_record_deleted = T("Program deleted"),
msg_list_empty = T("No Programs found")
)
represent = S3Represent(lookup=tablename, translate=True)
programme_id = S3ReusableField("programme_id", "reference %s" % tablename,
label = T("Program"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_programme.id",
represent,
updateable = True,
)),
sortby = "name",
comment = S3AddResourceLink(c="project",
f="programme",
),
)
self.configure(tablename,
deduplicate = self.programme_duplicate,
)
self.add_components(tablename,
project_project = {"link": "project_programme_project",
"joinby": "programme_id",
"key": "project_id",
"actuate": "link",
"autocomplete": "name",
"autodelete": False,
})
# ---------------------------------------------------------------------
# Project Programmes <=> Projects
#
tablename = "project_programme_project"
define_table(tablename,
programme_id(),
self.project_project_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"project_programme_id": programme_id,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names if module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return {"project_programme_id": lambda **attr: dummy("programme_id"),
}
# -------------------------------------------------------------------------
@staticmethod
def programme_duplicate(item):
""" Import item update-detection """
data = item.data
name = data.get("name")
if name:
table = item.table
query = (table.name.lower() == name.lower())
org = data.get("organisation_id")
if org:
query &= (table.organisation_id == org)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
return
# =============================================================================
class S3ProjectSectorModel(S3Model):
"""
Project Sector Model
"""
names = ("project_sector_project",)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Projects <> Sectors Link Table
#
tablename = "project_sector_project"
self.define_table(tablename,
self.org_sector_id(empty = False,
ondelete = "CASCADE",
),
self.project_project_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields()
)
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Sector"),
title_display = T("Sector"),
title_list = T("Sectors"),
title_update = T("Edit Sector"),
title_upload = T("Import Sector data"),
label_list_button = T("List Sectors"),
msg_record_created = T("Sector added to Project"),
msg_record_modified = T("Sector updated"),
msg_record_deleted = T("Sector removed from Project"),
msg_list_empty = T("No Sectors found for this Project")
)
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
class S3ProjectStatusModel(S3Model):
"""
Project Status Model
- used by both Projects & Activities
"""
names = ("project_status",
"project_status_id",
)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Project Statuses
#
tablename = "project_status"
self.define_table(tablename,
Field("name", length=128, notnull=True, unique=True,
label = T("Name"),
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_STATUS = T("Create Status")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_STATUS,
title_display = T("Status Details"),
title_list = T("Statuses"),
title_update = T("Edit Status"),
#title_upload = T("Import Statuses"),
label_list_button = T("List Statuses"),
label_delete_button = T("Delete Status"),
msg_record_created = T("Status added"),
msg_record_modified = T("Status updated"),
msg_record_deleted = T("Status deleted"),
msg_list_empty = T("No Statuses currently registered"))
# Reusable Field
represent = S3Represent(lookup=tablename, translate=True)
#none = T("Unknown"))
status_id = S3ReusableField("status_id", "reference %s" % tablename,
comment = S3AddResourceLink(title=ADD_STATUS,
c="project",
f="status"),
label = T("Status"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(current.db, "project_status.id",
represent,
sort=True)),
sortby = "name",
)
# Pass names back to global scope (s3.*)
return dict(project_status_id = status_id,
)
# =============================================================================
class S3ProjectThemeModel(S3Model):
"""
Project Theme Model
"""
names = ("project_theme",
"project_theme_id",
"project_theme_sector",
"project_theme_project",
"project_theme_activity",
"project_theme_location",
)
def model(self):
T = current.T
db = current.db
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
theme_percentages = current.deployment_settings.get_project_theme_percentages()
NONE = current.messages["NONE"]
# ---------------------------------------------------------------------
# Themes
#
tablename = "project_theme"
define_table(tablename,
Field("name", length=128, notnull=True, unique=True,
label = T("Name"),
represent = lambda v: T(v) if v is not None \
else NONE,
),
s3_comments(
represent = lambda v: T(v) if v is not None \
else NONE,
),
*s3_meta_fields())
# CRUD Strings
ADD_THEME = T("Create Theme")
crud_strings[tablename] = Storage(
label_create = ADD_THEME,
title_display = T("Theme Details"),
title_list = T("Themes"),
title_update = T("Edit Theme"),
#title_upload = T("Import Themes"),
label_list_button = T("List Themes"),
label_delete_button = T("Delete Theme"),
msg_record_created = T("Theme added"),
msg_record_modified = T("Theme updated"),
msg_record_deleted = T("Theme deleted"),
msg_list_empty = T("No Themes currently registered"))
# Reusable Field
represent = S3Represent(lookup=tablename, translate=True)
theme_id = S3ReusableField("theme_id", "reference %s" % tablename,
label = T("Theme"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_theme.id",
represent,
sort=True)),
sortby = "name",
)
# Components
add_components(tablename,
# Projects
project_theme_project = "theme_id",
# Sectors
project_theme_sector = "theme_id",
# For Sync Filter
org_sector = {"link": "project_theme_sector",
"joinby": "theme_id",
"key": "sector_id",
},
)
crud_form = S3SQLCustomForm(
"name",
# Project Sectors
S3SQLInlineComponent(
"theme_sector",
label = T("Sectors to which this Theme can apply"),
fields = ["sector_id"],
),
"comments"
)
configure(tablename,
crud_form = crud_form,
list_fields = ["id",
"name",
(T("Sectors"), "theme_sector.sector_id"),
"comments",
],
)
# ---------------------------------------------------------------------
# Theme <> Sector Link Table
#
tablename = "project_theme_sector"
define_table(tablename,
theme_id(empty = False,
ondelete = "CASCADE",
),
self.org_sector_id(label = "",
empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Sector"),
title_display = T("Sector"),
title_list = T("Sectors"),
title_update = T("Edit Sector"),
title_upload = T("Import Sector data"),
label_list_button = T("List Sectors"),
msg_record_created = T("Sector added to Theme"),
msg_record_modified = T("Sector updated"),
msg_record_deleted = T("Sector removed from Theme"),
msg_list_empty = T("No Sectors found for this Theme")
)
# ---------------------------------------------------------------------
# Theme <> Project Link Table
#
tablename = "project_theme_project"
define_table(tablename,
theme_id(empty = False,
ondelete = "CASCADE",
),
self.project_project_id(empty = False,
ondelete = "CASCADE",
),
# % breakdown by theme (sector in IATI)
Field("percentage", "integer",
default = 0,
label = T("Percentage"),
requires = IS_INT_IN_RANGE(0, 101),
readable = theme_percentages,
writable = theme_percentages,
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Add Theme"),
title_display = T("Theme"),
title_list = T("Themes"),
title_update = T("Edit Theme"),
#title_upload = T("Import Theme data"),
label_list_button = T("List Themes"),
msg_record_created = T("Theme added to Project"),
msg_record_modified = T("Theme updated"),
msg_record_deleted = T("Theme removed from Project"),
msg_list_empty = T("No Themes found for this Project")
)
configure(tablename,
deduplicate = self.project_theme_project_deduplicate,
onaccept = self.project_theme_project_onaccept,
)
# ---------------------------------------------------------------------
# Theme <> Activity Link Table
#
tablename = "project_theme_activity"
define_table(tablename,
theme_id(empty = False,
ondelete = "CASCADE",
),
self.project_activity_id(empty = False,
ondelete = "CASCADE",
),
# % breakdown by theme (sector in IATI)
#Field("percentage", "integer",
# label = T("Percentage"),
# default = 0,
# requires = IS_INT_IN_RANGE(0, 101),
# readable = theme_percentages,
# writable = theme_percentages,
# ),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("New Theme"),
title_display = T("Theme"),
title_list = T("Themes"),
title_update = T("Edit Theme"),
#title_upload = T("Import Theme data"),
label_list_button = T("List Themes"),
msg_record_created = T("Theme added to Activity"),
msg_record_modified = T("Theme updated"),
msg_record_deleted = T("Theme removed from Activity"),
msg_list_empty = T("No Themes found for this Activity")
)
configure(tablename,
deduplicate = self.project_theme_activity_deduplicate,
#onaccept = self.project_theme_activity_onaccept,
)
# ---------------------------------------------------------------------
# Theme <> Project Location Link Table
#
tablename = "project_theme_location"
define_table(tablename,
theme_id(empty = False,
ondelete = "CASCADE",
),
self.project_location_id(empty = False,
ondelete = "CASCADE",
),
# % breakdown by theme (sector in IATI)
Field("percentage", "integer",
default = 0,
label = T("Percentage"),
requires = IS_INT_IN_RANGE(0, 101),
readable = theme_percentages,
writable = theme_percentages,
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("New Theme"),
title_display = T("Theme"),
title_list = T("Themes"),
title_update = T("Edit Theme"),
title_upload = T("Import Theme data"),
label_list_button = T("List Themes"),
msg_record_created = T("Theme added to Project Location"),
msg_record_modified = T("Theme updated"),
msg_record_deleted = T("Theme removed from Project Location"),
msg_list_empty = T("No Themes found for this Project Location")
)
# Pass names back to global scope (s3.*)
return {}
# -------------------------------------------------------------------------
@staticmethod
def project_theme_project_onaccept(form):
"""
Record creation post-processing
Update the percentages of all the Project's Locations.
"""
# Check for prepop
project_id = form.vars.get("project_id", None)
if not project_id and form.request_vars:
# Interactive form
project_id = form.request_vars.get("project_id", None)
if not project_id:
return
# Calculate the list of Percentages for this Project
percentages = {}
db = current.db
table = db.project_theme_project
query = (table.deleted == False) & \
(table.project_id == project_id)
rows = db(query).select(table.theme_id,
table.percentage)
for row in rows:
percentages[row.theme_id] = row.percentage
# Update the Project's Locations
s3db = current.s3db
table = s3db.project_location
ltable = s3db.project_theme_location
update_or_insert = ltable.update_or_insert
query = (table.deleted == False) & \
(table.project_id == project_id)
rows = db(query).select(table.id)
for row in rows:
for theme_id in percentages:
update_or_insert(project_location_id = row.id,
theme_id = theme_id,
percentage = percentages[theme_id])
# -------------------------------------------------------------------------
@staticmethod
def project_theme_project_deduplicate(item):
""" Import item de-duplication """
data = item.data
project_id = data.get("project_id")
theme_id = data.get("theme_id")
if project_id and theme_id:
table = item.table
query = (table.project_id == project_id) & \
(table.theme_id == theme_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def project_theme_activity_deduplicate(item):
""" Import item de-duplication """
data = item.data
activity_id = data.get("activity_id")
theme_id = data.get("theme_id")
if activity_id and theme_id:
table = item.table
query = (table.activity_id == activity_id) & \
(table.theme_id == theme_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3ProjectDRRModel(S3Model):
"""
Models for DRR (Disaster Risk Reduction) extensions
"""
names = ("project_drr",)
def model(self):
T = current.T
hfa_opts = project_hfa_opts()
options = dict((opt, "HFA %s" % opt) for opt in hfa_opts)
tablename = "project_drr"
self.define_table(tablename,
self.project_project_id(empty=False),
Field("hfa", "list:integer",
label = T("HFA Priorities"),
represent = S3Represent(options=options,
multiple=True),
requires = IS_EMPTY_OR(IS_IN_SET(
options,
multiple = True)),
widget = S3GroupedOptionsWidget(
cols=1,
help_field=hfa_opts
),
),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return {}
# -------------------------------------------------------------------------
@staticmethod
def hfa_opts_represent(opt):
""" Option representation """
if not opt:
return current.messages["NONE"]
if isinstance(opt, int):
opts = [opt]
elif not isinstance(opt, (list, tuple)):
return current.messages["NONE"]
else:
opts = opt
if opts[0] is None:
return current.messages["NONE"]
vals = ["HFA %s" % o for o in opts]
return ", ".join(vals)
# =============================================================================
class S3ProjectDRRPPModel(S3Model):
"""
Models for DRR Project Portal extensions
- injected into custom Project CRUD forms
"""
names = ("project_drrpp",)
def model(self):
T = current.T
db = current.db
NONE = current.messages["NONE"]
local_currencies = current.deployment_settings.get_fin_currencies().keys()
try:
local_currencies.remove("USD")
except ValueError:
# Already removed
pass
project_rfa_opts = self.project_rfa_opts()
project_pifacc_opts = self.project_pifacc_opts()
project_jnap_opts = self.project_jnap_opts()
tablename = "project_drrpp"
self.define_table(tablename,
self.project_project_id(
# Override requires so that update access to the projects isn't required
requires = IS_ONE_OF(db, "project_project.id",
self.project_project_represent
)
),
Field("parent_project",
label = T("Name of a programme or another project which this project is implemented as part of"),
represent = lambda v: v or NONE,
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Parent Project"),
# T("The parent project or programme which this project is implemented under"))),
),
Field("duration", "integer",
label = T("Duration (months)"),
represent = lambda v: v or NONE,
),
Field("local_budget", "double",
label = T("Total Funding (Local Currency)"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
),
s3_currency("local_currency",
label = T("Local Currency"),
requires = IS_IN_SET(local_currencies,
zero=None)
),
Field("activities", "text",
label = T("Activities"),
represent = lambda v: v or NONE,
),
Field("rfa", "list:integer",
label = T("RFA Priorities"),
represent = lambda opt: \
self.opts_represent(opt, "RFA"),
requires = IS_EMPTY_OR(
IS_IN_SET(project_rfa_opts.keys(),
labels = ["RFA %s" % \
rfa for rfa in project_rfa_opts.keys()],
multiple = True)),
widget = S3GroupedOptionsWidget(help_field = project_rfa_opts,
cols = 1,
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("RFA Priorities"),
T("Applicable to projects in Pacific countries only"))),
),
Field("pifacc", "list:integer",
label = T("PIFACC Priorities"),
represent = lambda opt: \
self.opts_represent(opt, "PIFACC"),
requires = IS_EMPTY_OR(
IS_IN_SET(project_pifacc_opts.keys(),
labels = ["PIFACC %s" % \
pifacc for pifacc in project_pifacc_opts.keys()],
multiple = True)),
widget = S3GroupedOptionsWidget(help_field = project_pifacc_opts,
cols = 1,
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("PIFACC Priorities"),
T("Pacific Islands Framework for Action on Climate Change. Applicable to projects in Pacific countries only"))),
),
Field("jnap", "list:integer",
label = T("JNAP Priorities"),
represent = lambda opt: \
self.opts_represent(opt, "JNAP"),
requires = IS_EMPTY_OR(
IS_IN_SET(project_jnap_opts.keys(),
labels = ["JNAP %s" % \
jnap for jnap in project_jnap_opts.keys()],
multiple = True)),
widget = S3GroupedOptionsWidget(help_field = project_jnap_opts,
cols = 1,
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("JNAP Priorities"),
T("Joint National Action Plan for Disaster Risk Management and Climate Change Adaptation. Applicable to Cook Islands only"))),
),
Field("L1", "list:integer",
label = T("Cook Islands"),
represent = S3Represent(lookup="gis_location",
multiple=True),
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "gis_location.id",
S3Represent(lookup="gis_location"),
filterby = "L0",
filter_opts = ("Cook Islands",),
not_filterby = "name",
not_filter_opts = ("Cook Islands",),
multiple=True)),
widget = S3GroupedOptionsWidget(size = None, # do not group by letter
cols = 4,
),
),
Field("outputs", "text",
label = "%s (Old - do NOT use)" % T("Outputs"),
represent = lambda v: v or NONE,
readable = False,
writable = False,
),
Field("focal_person",
label = T("Focal Person"),
represent = lambda v: v or NONE,
requires = IS_NOT_EMPTY(),
),
self.org_organisation_id(label = T("Organization")),
Field("email",
label = T("Email"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(IS_EMAIL()),
),
*s3_meta_fields())
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
title_display = T("DRRPP Extensions"),
title_update = T("Edit DRRPP Extensions"),
)
self.configure(tablename,
onaccept = self.project_drrpp_onaccept,
)
# Pass names back to global scope (s3.*)
return {}
# -------------------------------------------------------------------------
@staticmethod
def project_drrpp_onaccept(form):
"""
After DB I/O tasks for Project DRRPP records
"""
db = current.db
vars = form.vars
id = vars.id
project_id = vars.project_id
dtable = db.project_drrpp
if not project_id:
# Most reliable way to get the project_id is to read the record
project_id = db(dtable.id == id).select(dtable.project_id,
limitby=(0, 1)
).first().project_id
table = db.project_project
hr_id = db(table.id == project_id).select(table.human_resource_id,
limitby=(0, 1)
).first().human_resource_id
if hr_id:
s3db = current.s3db
htable = db.hrm_human_resource
ctable = s3db.pr_contact
ptable = db.pr_person
query = (htable.id == hr_id) & \
(ptable.id == htable.person_id)
left = ctable.on((ctable.pe_id == ptable.pe_id) & \
(ctable.contact_method == "EMAIL"))
row = db(query).select(htable.organisation_id,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
ctable.value,
left=left,
limitby=(0, 1)).first()
focal_person = s3_fullname(row[ptable])
organisation_id = row[htable].organisation_id
email = row[ctable].value
db(dtable.id == id).update(focal_person = focal_person,
organisation_id = organisation_id,
email = email,
)
# -------------------------------------------------------------------------
@staticmethod
def opts_represent(opt, prefix):
""" Option representation """
if isinstance(opt, int):
opts = [opt]
if isinstance(opt, (list, tuple)):
if not opt or opt[0] is None:
return current.messages["NONE"]
else:
return ", ".join(["%s %s" % (prefix, o) for o in opt])
else:
return current.messages["NONE"]
# =============================================================================
class S3ProjectTaskModel(S3Model):
"""
Project Task Model
This class holds the tables used for an Organisation to manage
their Tasks in detail.
"""
names = ("project_milestone",
"project_tag",
"project_task",
"project_task_id",
"project_role",
"project_member",
"project_time",
"project_comment",
"project_task_project",
"project_task_activity",
"project_task_milestone",
"project_task_tag",
"project_task_represent_w_project",
"project_task_active_statuses",
"project_task_project_opts",
)
def model(self):
db = current.db
T = current.T
auth = current.auth
request = current.request
s3 = current.response.s3
settings = current.deployment_settings
project_id = self.project_project_id
messages = current.messages
UNKNOWN_OPT = messages.UNKNOWN_OPT
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# Project Milestone
#
tablename = "project_milestone"
define_table(tablename,
# Stage Report
super_link("doc_id", "doc_entity"),
project_id(),
Field("name",
label = T("Short Description"),
requires = IS_NOT_EMPTY()
),
s3_date(),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_MILESTONE = T("Create Milestone")
crud_strings[tablename] = Storage(
label_create = ADD_MILESTONE,
title_display = T("Milestone Details"),
title_list = T("Milestones"),
title_update = T("Edit Milestone"),
#title_upload = T("Import Milestones"),
label_list_button = T("List Milestones"),
msg_record_created = T("Milestone Added"),
msg_record_modified = T("Milestone Updated"),
msg_record_deleted = T("Milestone Deleted"),
msg_list_empty = T("No Milestones Found")
)
# Reusable Field
represent = S3Represent(lookup=tablename,
fields=["name", "date"],
labels="%(name)s: %(date)s",
)
milestone_id = S3ReusableField("milestone_id", "reference %s" % tablename,
label = T("Milestone"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_milestone.id",
represent)),
sortby = "name",
comment = S3AddResourceLink(c="project",
f="milestone",
title=ADD_MILESTONE,
tooltip=T("A project milestone marks a significant date in the calendar which shows that progress towards the overall objective is being made.")),
)
configure(tablename,
deduplicate = self.project_milestone_duplicate,
orderby = "project_milestone.date",
)
# ---------------------------------------------------------------------
# Project Tags
#
tablename = "project_tag"
define_table(tablename,
Field("name",
label = T("Tag"),
),
*s3_meta_fields())
# CRUD Strings
ADD_TAG = T("Create Tag")
crud_strings[tablename] = Storage(
label_create = ADD_TAG,
title_display = T("Tag Details"),
title_list = T("Tags"),
title_update = T("Edit Tag"),
title_upload = T("Import Tags"),
label_list_button = T("List Tags"),
msg_record_created = T("Tag added"),
msg_record_modified = T("Tag updated"),
msg_record_deleted = T("Tag deleted"),
msg_list_empty = T("No tags currently defined"))
# Reusable Field
represent = S3Represent(lookup=tablename)
tag_id = S3ReusableField("tag_id", "reference %s" % tablename,
label = T("Tag"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_tag.id",
represent)),
sortby = "name",
comment = S3AddResourceLink(c="project",
f="tag",
title=ADD_TAG,
tooltip=T("A project tag helps to assosiate keywords with projects/tasks.")),
)
# ---------------------------------------------------------------------
# Tasks
#
# Tasks can be linked to Activities or directly to Projects
# - they can also be used by the Event/Scenario modules
#
# @ToDo: Task templates
# @ToDo: Recurring tasks
#
project_task_priority_opts = settings.get_project_task_priority_opts()
project_task_status_opts = settings.get_project_task_status_opts()
# Which options for the Status for a Task count as the task being 'Active'
project_task_active_statuses = [2, 3, 4, 11]
assignee_represent = self.pr_PersonEntityRepresent(show_label = False,
show_type = False)
#staff = auth.s3_has_role("STAFF")
staff = auth.is_logged_in()
tablename = "project_task"
define_table(tablename,
super_link("doc_id", "doc_entity"),
Field("template", "boolean",
default = False,
readable = False,
writable = False,
),
Field("name", length=100, notnull=True,
label = T("Short Description"),
requires = IS_LENGTH(maxsize=100, minsize=1),
),
Field("description", "text",
label = T("Detailed Description/URL"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Detailed Description/URL"),
T("Please provide as much detail as you can, including the URL(s) where the bug occurs or you'd like the new feature to go."))),
),
self.org_site_id,
self.gis_location_id(
# Can be enabled & labelled within a Template as-required
#label = T("Deployment Location"),
readable = False,
writable = False
),
Field("source",
label = T("Source"),
),
Field("source_url",
label = T("Source Link"),
represent = s3_url_represent,
requires = IS_EMPTY_OR(IS_URL()),
),
Field("priority", "integer",
default = 3,
label = T("Priority"),
represent = lambda opt: \
project_task_priority_opts.get(opt,
UNKNOWN_OPT),
requires = IS_IN_SET(project_task_priority_opts,
zero=None),
),
# Could be a Person, Team or Organisation
super_link("pe_id", "pr_pentity",
readable = staff,
writable = staff,
label = T("Assigned to"),
filterby = "instance_type",
filter_opts = ("pr_person", "pr_group", "org_organisation"),
represent = assignee_represent,
# @ToDo: Widget
#widget = S3PentityWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Assigned to"),
# messages.AUTOCOMPLETE_HELP))
),
s3_datetime("date_due",
label = T("Date Due"),
represent = "date",
readable = staff,
writable = staff,
),
Field("time_estimated", "double",
label = "%s (%s)" % (T("Time Estimate"),
T("hours")),
represent = lambda v: v or "",
readable = staff,
writable = staff,
),
Field("time_actual", "double",
label = "%s (%s)" % (T("Time Taken"),
T("hours")),
readable = staff,
# This comes from the Time component
writable = False,
),
Field("status", "integer",
default = 2,
label = T("Status"),
represent = lambda opt: \
project_task_status_opts.get(opt,
UNKNOWN_OPT),
requires = IS_IN_SET(project_task_status_opts,
zero=None),
readable = staff,
writable = staff,
),
Field.Method("task_id", self.project_task_task_id),
*s3_meta_fields())
# Field configurations
# Comment these if you don't need a Site associated with Tasks
#table.site_id.readable = table.site_id.writable = True
#table.site_id.label = T("Check-in at Facility") # T("Managing Office")
# @todo: make lazy_table
table = db[tablename]
table.created_on.represent = lambda dt: \
S3DateTime.date_represent(dt, utc=True)
# CRUD Strings
ADD_TASK = T("Create Task")
crud_strings[tablename] = Storage(
label_create = ADD_TASK,
title_display = T("Task Details"),
title_list = T("All Tasks"),
title_update = T("Edit Task"),
title_upload = T("Import Tasks"),
label_list_button = T("List Tasks"),
msg_record_created = T("Task added"),
msg_record_modified = T("Task updated"),
msg_record_deleted = T("Task deleted"),
msg_list_empty = T("No tasks currently registered"))
list_fields = ["id",
(T("ID"), "task_id"),
"priority",
]
lappend = list_fields.append
filter_widgets = [S3TextFilter(["name",
"description",
],
label = T("Search"),
_class = "filter-search",
),
S3OptionsFilter("priority",
options = project_task_priority_opts,
cols = 4,
),
]
fappend = filter_widgets.append
crud_fields = []
cappend = crud_fields.append
jquery_ready_append = s3.jquery_ready.append
use_projects = settings.get_project_projects()
if use_projects and current.request.function != "project":
jquery_ready_append = s3.jquery_ready.append
lappend("task_project.project_id")
fappend(S3OptionsFilter("task_project.project_id",
options = self.project_task_project_opts,
))
cappend(S3SQLInlineComponent("task_project",
label = T("Project"),
fields = [("", "project_id")],
multiple = False,
))
if settings.get_project_activities():
lappend("task_activity.activity_id")
fappend(S3OptionsFilter("task_activity.activity_id",
options = self.project_task_activity_opts,
))
cappend(S3SQLInlineComponent("task_activity",
label = T("Activity"),
fields = [("", "activity_id")],
multiple = False,
))
if use_projects:
# Filter Activity List to just those for the Project
options = {"trigger": {"alias": "task_project",
"name": "project_id",
},
"target": {"alias": "task_activity",
"name": "activity_id",
},
"scope": "form",
"lookupPrefix": "project",
"lookupResource": "activity",
"optional": True,
}
jquery_ready_append('''$.filterOptionsS3(%s)''' % \
json.dumps(options, separators=SEPARATORS))
if settings.get_project_task_tag():
lappend("task_tag.tag_id")
fappend(S3OptionsFilter("task_tag.tag_id",
))
cappend(S3SQLInlineComponent("task_tag",
label = T("Tags"),
fields = [("", "tag_id")],
))
crud_fields.extend(("name",
"description",
"source",
"priority",
"pe_id",
"date_due",
))
if settings.get_project_milestones():
# Use the field in this format to get the custom represent
lappend("task_milestone.milestone_id")
fappend(S3OptionsFilter("task_milestone.milestone_id",
options = self.project_task_milestone_opts,
))
cappend(S3SQLInlineComponent("task_milestone",
label = T("Milestone"),
fields = [("", "milestone_id")],
multiple = False,
))
if use_projects:
# Filter Milestone List to just those for the Project
options = {"trigger": {"alias": "task_project",
"name": "project_id",
},
"target": {"alias": "task_milestone",
"name": "milestone_id",
},
"scope": "form",
"lookupPrefix": "project",
"lookupResource": "milestone",
"optional": True,
}
jquery_ready_append('''$.filterOptionsS3(%s)''' % \
json.dumps(options, separators=SEPARATORS))
list_fields.extend(("name",
"pe_id",
"date_due",
"time_estimated",
"time_actual",
"created_on",
"status",
#"site_id"
))
filter_widgets.extend((S3OptionsFilter("pe_id",
label = T("Assigned To"),
none = T("Unassigned"),
),
S3OptionsFilter("status",
options = project_task_status_opts,
),
S3OptionsFilter("created_by",
label = T("Created By"),
hidden = True,
),
S3DateFilter("created_on",
label = T("Date Created"),
hide_time = True,
hidden = True,
),
S3DateFilter("date_due",
hide_time = True,
hidden = True,
),
S3DateFilter("modified_on",
label = T("Date Modified"),
hide_time = True,
hidden = True,
),
))
crud_fields.extend(("time_estimated",
"status",
S3SQLInlineComponent("time",
label = T("Time Log"),
fields = ["date",
"person_id",
"hours",
"comments"
],
orderby = "date"
),
"time_actual",
))
# Custom Form
crud_form = S3SQLCustomForm(*crud_fields)
report_options = Storage(rows = list_fields,
cols = list_fields,
fact = list_fields,
defaults = Storage(rows = "task.project",
cols = "task.pe_id",
fact = "sum(task.time_estimated)",
totals = True
),
)
# Resource Configuration
configure(tablename,
context = {#"event": "event.event_id",
"incident": "incident.incident_id",
"location": "location_id",
# Assignee instead?
"organisation": "created_by$organisation_id",
},
copyable = True,
#create_next = URL(f="task", args=["[id]"]),
create_onaccept = self.project_task_create_onaccept,
crud_form = crud_form,
extra = "description",
extra_fields = ["id"],
filter_widgets = filter_widgets,
list_fields = list_fields,
list_layout = project_task_list_layout,
onvalidation = self.project_task_onvalidation,
orderby = "project_task.priority,project_task.date_due asc",
realm_entity = self.project_task_realm_entity,
report_options = report_options,
super_entity = "doc_entity",
update_onaccept = self.project_task_update_onaccept,
)
# Reusable field
represent = project_TaskRepresent(show_link=True)
task_id = S3ReusableField("task_id", "reference %s" % tablename,
label = T("Task"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_task.id",
represent)),
sortby = "name",
comment = S3AddResourceLink(c="project",
f="task",
title=ADD_TASK,
tooltip=T("A task is a piece of work that an individual or team can do in 1-2 days.")),
)
# Representation with project name, for time log form
project_task_represent_w_project = project_TaskRepresent(show_project=True)
# Custom Methods
set_method("project", "task",
method = "dispatch",
action = self.project_task_dispatch)
# Components
add_components(tablename,
# Projects (for imports)
project_project = {"link": "project_task_project",
"joinby": "task_id",
"key": "project_id",
"actuate": "embed",
"autocomplete": "name",
"autodelete": False,
},
# Format for S3SQLInlineComponent
project_task_project = "task_id",
#project_activity_group = "activity_id",
# Activities
project_activity = {"link": "project_task_activity",
"joinby": "task_id",
"key": "activity_id",
"actuate": "embed",
"autocomplete": "name",
"autodelete": False,
},
# Format for S3SQLInlineComponent
project_task_activity = "task_id",
# Incidents
#event_incident = {"link": "event_task",
# "joinby": "task_id",
# "key": "incident_id",
# "actuate": "embed",
# "autocomplete": "name",
# "autodelete": False,
# },
# Format for InlineComponent
event_task = {"name": "incident",
"joinby": "task_id",
},
# Milestones
project_milestone = {"link": "project_task_milestone",
"joinby": "task_id",
"key": "milestone_id",
"actuate": "embed",
"autocomplete": "name",
"autodelete": False,
},
# Format for S3SQLInlineComponent
project_task_milestone = "task_id",
# Members
project_member = "task_id",
# Tags
project_tag = {"link": "project_task_tag",
"joinby": "task_id",
"key": "tag_id",
"actuate": "embed",
"autocomplete": "name",
"autodelete": False,
},
# Format for S3SQLInlineComponent
project_task_tag = "task_id",
# Job titles
hrm_job_title = {"link": "project_task_job_title",
"joinby": "task_id",
"key": "job_title_id",
"actuate": "embed",
"autocomplete": "name",
"autodelete": False,
},
# Human Resources (assigned)
hrm_human_resource = {"link": "project_task_human_resource",
"joinby": "task_id",
"key": "human_resource_id",
"actuate": "embed",
"autocomplete": "name",
"autodelete": False
},
# Requests
req_req = {"link": "project_task_req",
"joinby": "task_id",
"key": "req_id",
"actuate": "embed",
"autocomplete": "request_number",
"autodelete": False,
},
# Time
project_time = "task_id",
# Comments (for imports))
project_comment = "task_id",
)
# ---------------------------------------------------------------------
# Link Tasks <-> Projects
#
tablename = "project_task_project"
define_table(tablename,
task_id(empty = False,
ondelete = "CASCADE",
),
project_id(
empty = False,
ondelete = "CASCADE",
# Override requires so that update access to the projects isn't required
requires = IS_ONE_OF(db, "project_project.id",
self.project_project_represent
)
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Link task <-> activity
#
# Tasks <> Activities
tablename = "project_task_activity"
define_table(tablename,
task_id(empty = False,
ondelete = "CASCADE",
),
self.project_activity_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Link task <-> milestone
#
# Tasks <> Milestones
tablename = "project_task_milestone"
define_table(tablename,
task_id(empty = False,
ondelete = "CASCADE",
),
milestone_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Link task <-> tags
#
# Tasks <> Tags
tablename = "project_task_tag"
define_table(tablename,
task_id(empty = False,
ondelete = "CASCADE",
),
tag_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Project comment
#
# @ToDo: Attachments?
#
# Parent field allows us to:
# * easily filter for top-level threads
# * easily filter for next level of threading
# * hook a new reply into the correct location in the hierarchy
#
tablename = "project_comment"
define_table(tablename,
Field("parent", "reference project_comment",
readable = False,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_comment.id"
)),
),
task_id(empty = False,
ondelete = "CASCADE",
),
Field("body", "text", notnull=True,
label = T("Comment"),
),
*s3_meta_fields())
# Resource Configuration
configure(tablename,
list_fields = ["id",
"task_id",
"created_by",
"modified_on"
],
)
# ---------------------------------------------------------------------
# Project Task Roles
# - Users can assign themselves roles while working on tasks
#
tablename = "project_role"
define_table(tablename,
Field("role", length=128, notnull=True, unique=True,
label=T("Role"),
requires = IS_NOT_ONE_OF(db,
"project_role.role"),
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Role"),
title_display = T("Task Role"),
title_list = T("Task Roles"),
title_update = T("Edit Role"),
label_list_button = T("List Roles"),
label_delete_button = T("Delete Role"),
msg_record_created = T("Role added"),
msg_record_modified = T("Role updated"),
msg_record_deleted = T("Role deleted"),
msg_list_empty = T("No such Role exists"))
represent = S3Represent(lookup=tablename,
fields=["role"])
role_id = S3ReusableField("role_id", "reference %s" % tablename,
ondelete = "CASCADE",
requires = IS_EMPTY_OR(IS_ONE_OF(db,
"project_role.id",
represent)),
represent = represent,
)
# ---------------------------------------------------------------------
# Project Members
# - Members for tasks in Project
#
person_id = self.pr_person_id
tablename = "project_member"
define_table(tablename,
person_id(label = T("Member"),
default = auth.s3_logged_in_person(),
widget = SQLFORM.widgets.options.widget),
role_id(label=T("Role")),
task_id(empty = False,
ondelete = "CASCADE"),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Project Time
# - used to Log hours spent on a Task
#
tablename = "project_time"
define_table(tablename,
task_id(
requires = IS_ONE_OF(db, "project_task.id",
project_task_represent_w_project,
),
),
self.pr_person_id(default=auth.s3_logged_in_person(),
widget = SQLFORM.widgets.options.widget
),
s3_datetime(default="now",
past=8760, # Hours, so 1 year
future=0
),
Field("hours", "double",
label = "%s (%s)" % (T("Time"),
T("hours")),
represent=lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2)),
Field.Method("day", project_time_day),
Field.Method("week", project_time_week),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Log Time Spent"),
title_display = T("Logged Time Details"),
title_list = T("Logged Time"),
title_update = T("Edit Logged Time"),
title_upload = T("Import Logged Time data"),
title_report = T("Project Time Report"),
label_list_button = T("List Logged Time"),
msg_record_created = T("Time Logged"),
msg_record_modified = T("Time Log Updated"),
msg_record_deleted = T("Time Log Deleted"),
msg_list_empty = T("No Time Logged")
)
if "rows" in request.get_vars and request.get_vars.rows == "project":
crud_strings[tablename].title_report = T("Project Time Report")
list_fields = ["id",
(T("Project"), "task_id$task_project.project_id"),
(T("Activity"), "task_id$task_activity.activity_id"),
"task_id",
"person_id",
"date",
"hours",
"comments",
]
filter_widgets = [
S3OptionsFilter("person_id",
#label = T("Person"),
),
S3OptionsFilter("task_id$task_project.project_id",
#label = T("Project"),
options = self.project_task_project_opts,
),
S3OptionsFilter("task_id$task_activity.activity_id",
#label = T("Activity"),
options = self.project_task_activity_opts,
hidden = True,
),
S3DateFilter("date",
#label = T("Date"),
hide_time = True,
hidden = True,
),
]
if settings.get_project_milestones():
# Use the field in this format to get the custom represent
list_fields.insert(3, (T("Milestone"), "task_id$task_milestone.milestone_id"))
filter_widgets.insert(3, S3OptionsFilter("task_id$task_milestone.milestone_id",
#label = T("Milestone"),
hidden = True,
))
report_fields = list_fields + \
[(T("Day"), "day"),
(T("Week"), "week")]
if settings.get_project_sectors():
report_fields.insert(3, (T("Sector"),
"task_id$task_project.project_id$sector_project.sector_id"))
filter_widgets.insert(1, S3OptionsFilter("task_id$task_project.project_id$sector_project.sector_id",
#label = T("Sector"),
))
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_fields,
defaults = Storage(
rows = "task_id$task_project.project_id",
cols = "person_id",
fact = "sum(hours)",
totals = True,
),
)
configure(tablename,
filter_widgets = filter_widgets,
list_fields = list_fields,
onaccept = self.project_time_onaccept,
report_fields = ["date"],
report_options = report_options,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(
project_task_id = task_id,
project_task_active_statuses = project_task_active_statuses,
project_task_represent_w_project = project_task_represent_w_project,
project_task_project_opts = self.project_task_project_opts
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for model-global names if module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(project_task_id = lambda **attr: dummy("task_id"),
project_task_active_statuses = [],
)
# -------------------------------------------------------------------------
@staticmethod
def project_task_task_id(row):
""" The record ID of a task as separate column in the data table """
if hasattr(row, "project_task"):
row = row.project_task
try:
return row.id
except AttributeError:
return None
# -------------------------------------------------------------------------
@staticmethod
def project_task_project_opts():
"""
Provide the options for the Project search filter
- all Projects with Tasks
"""
db = current.db
ptable = db.project_project
ttable = db.project_task
ltable = db.project_task_project
query = (ttable.deleted != True) & \
(ltable.task_id == ttable.id) & \
(ltable.project_id == ptable.id)
rows = db(query).select(ptable.id, ptable.name)
return dict((row.id, row.name) for row in rows)
# -------------------------------------------------------------------------
@staticmethod
def project_task_activity_opts():
"""
Provide the options for the Activity search filter
- all Activities with Tasks
"""
db = current.db
atable = db.project_activity
ttable = db.project_task
ltable = db.project_task_activity
query = (ttable.deleted == False) & \
(ltable.task_id == ttable.id) & \
(ltable.activity_id == atable.id)
rows = db(query).select(atable.id, atable.name)
return dict((row.id, row.name) for row in rows)
# -------------------------------------------------------------------------
@staticmethod
def project_task_milestone_opts():
"""
Provide the options for the Milestone search filter
- all Milestones with Tasks
"""
db = current.db
mtable = db.project_milestone
ttable = db.project_task
ltable = db.project_task_milestone
query = (ttable.deleted == False) & \
(ltable.task_id == ttable.id) & \
(ltable.milestone_id == mtable.id)
rows = db(query).select(mtable.id, mtable.name)
return dict((row.id, row.name) for row in rows)
# -------------------------------------------------------------------------
@staticmethod
def project_task_realm_entity(table, record):
""" Set the task realm entity to the project's realm entity """
task_id = record.id
db = current.db
ptable = db.project_project
ltable = db.project_task_project
query = (ltable.task_id == task_id) & \
(ltable.project_id == ptable.id)
project = db(query).select(ptable.realm_entity,
limitby=(0, 1)).first()
if project:
return project.realm_entity
else:
return None
# -------------------------------------------------------------------------
@staticmethod
def project_task_onvalidation(form):
""" Task form validation """
vars = form.vars
if str(vars.status) == "3" and not vars.pe_id:
form.errors.pe_id = \
current.T("Status 'assigned' requires the %(fieldname)s to not be blank") % \
dict(fieldname=current.db.project_task.pe_id.label)
elif vars.pe_id and str(vars.status) == "2":
# Set the Status to 'Assigned' if left at default 'New'
vars.status = 3
# -------------------------------------------------------------------------
@staticmethod
def project_task_create_onaccept(form):
"""
When a Task is created:
* Process the additional fields: Project/Activity/Milestone
* create associated Link Table records
* notify assignee
"""
db = current.db
s3db = current.s3db
session = current.session
id = form.vars.id
if session.s3.incident:
# Create a link between this Task & the active Incident
etable = s3db.event_task
etable.insert(incident_id=session.s3.incident,
task_id=id)
ltp = db.project_task_project
vars = current.request.post_vars
project_id = vars.get("project_id", None)
if project_id:
# Create Link to Project
link_id = ltp.insert(task_id = id,
project_id = project_id)
activity_id = vars.get("activity_id", None)
if activity_id:
# Create Link to Activity
lta = db.project_task_activity
link_id = lta.insert(task_id = id,
activity_id = activity_id)
milestone_id = vars.get("milestone_id", None)
if milestone_id:
# Create Link to Milestone
ltable = db.project_task_milestone
link_id = ltable.insert(task_id = id,
milestone_id = milestone_id)
# Make sure the task is also linked to the project
# when created under an activity
row = db(ltp.task_id == id).select(ltp.project_id,
limitby=(0, 1)).first()
if not row:
lta = db.project_task_activity
ta = db.project_activity
query = (lta.task_id == id) & \
(lta.activity_id == ta.id)
row = db(query).select(ta.project_id,
limitby=(0, 1)).first()
if row and row.project_id:
ltp.insert(task_id=id,
project_id=row.project_id)
# Notify Assignee
task_notify(form)
# -------------------------------------------------------------------------
@staticmethod
def project_task_update_onaccept(form):
"""
* Process the additional fields: Project/Activity/Milestone
* Log changes as comments
* If the task is assigned to someone then notify them
"""
db = current.db
s3db = current.s3db
vars = form.vars
id = vars.id
record = form.record
table = db.project_task
changed = {}
if record: # Not True for a record merger
for var in vars:
vvar = vars[var]
rvar = record[var]
if vvar != rvar:
type = table[var].type
if type == "integer" or \
type.startswith("reference"):
if vvar:
vvar = int(vvar)
if vvar == rvar:
continue
represent = table[var].represent
if not represent:
represent = lambda o: o
if rvar:
changed[var] = "%s changed from %s to %s" % \
(table[var].label, represent(rvar), represent(vvar))
else:
changed[var] = "%s changed to %s" % \
(table[var].label, represent(vvar))
if changed:
table = db.project_comment
text = s3_auth_user_represent(current.auth.user.id)
for var in changed:
text = "%s\n%s" % (text, changed[var])
table.insert(task_id=id,
body=text)
vars = current.request.post_vars
if "project_id" in vars:
ltable = db.project_task_project
filter = (ltable.task_id == id)
project = vars.project_id
if project:
# Create the link to the Project
#ptable = db.project_project
#master = s3db.resource("project_task", id=id)
#record = db(ptable.id == project).select(ptable.id,
# limitby=(0, 1)).first()
#link = s3db.resource("project_task_project")
#link_id = link.update_link(master, record)
query = (ltable.task_id == id) & \
(ltable.project_id == project)
record = db(query).select(ltable.id, limitby=(0, 1)).first()
if record:
link_id = record.id
else:
link_id = ltable.insert(task_id = id,
project_id = project)
filter = filter & (ltable.id != link_id)
# Remove any other links
links = s3db.resource("project_task_project", filter=filter)
links.delete()
if "activity_id" in vars:
ltable = db.project_task_activity
filter = (ltable.task_id == id)
activity = vars.activity_id
if vars.activity_id:
# Create the link to the Activity
#atable = db.project_activity
#master = s3db.resource("project_task", id=id)
#record = db(atable.id == activity).select(atable.id,
# limitby=(0, 1)).first()
#link = s3db.resource("project_task_activity")
#link_id = link.update_link(master, record)
query = (ltable.task_id == id) & \
(ltable.activity_id == activity)
record = db(query).select(ltable.id, limitby=(0, 1)).first()
if record:
link_id = record.id
else:
link_id = ltable.insert(task_id = id,
activity_id = activity)
filter = filter & (ltable.id != link_id)
# Remove any other links
links = s3db.resource("project_task_activity", filter=filter)
links.delete()
if "milestone_id" in vars:
ltable = db.project_task_milestone
filter = (ltable.task_id == id)
milestone = vars.milestone_id
if milestone:
# Create the link to the Milestone
#mtable = db.project_milestone
#master = s3db.resource("project_task", id=id)
#record = db(mtable.id == milestone).select(mtable.id,
# limitby=(0, 1)).first()
#link = s3db.resource("project_task_milestone")
#link_id = link.update_link(master, record)
query = (ltable.task_id == id) & \
(ltable.milestone_id == milestone)
record = db(query).select(ltable.id, limitby=(0, 1)).first()
if record:
link_id = record.id
else:
link_id = ltable.insert(task_id = id,
milestone_id = milestone)
filter = filter & (ltable.id != link_id)
# Remove any other links
links = s3db.resource("project_task_milestone", filter=filter)
links.delete()
# Notify Assignee
task_notify(form)
# -------------------------------------------------------------------------
@staticmethod
def project_task_dispatch(r, **attr):
"""
Send a Task Dispatch notice from a Task
- if a location is supplied, this will be formatted as an OpenGeoSMS
"""
if r.representation == "html" and \
r.name == "task" and r.id and not r.component:
record = r.record
text = "%s: %s" % (record.name,
record.description)
# Encode the message as an OpenGeoSMS
msg = current.msg
message = msg.prepare_opengeosms(record.location_id,
code="ST",
map="google",
text=text)
# URL to redirect to after message sent
url = URL(c="project",
f="task",
args=r.id)
# Create the form
if record.pe_id:
opts = dict(recipient=record.pe_id)
else:
opts = dict(recipient_type="pr_person")
output = msg.compose(type="SMS",
message = message,
url = url,
**opts)
# Maintain RHeader for consistency
if "rheader" in attr:
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = current.T("Send Task Notification")
current.response.view = "msg/compose.html"
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# -------------------------------------------------------------------------
@staticmethod
def project_milestone_duplicate(item):
"""
Import item de-duplication
- Duplicate if same Name & Project
"""
data = item.data
name = data.get("name")
if not name:
# Nothing we can work with
return
table = item.table
query = (table.name.lower() == name.lower())
project_id = data.get("project_id")
if project_id:
query &= (table.project_id == project_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def project_time_onaccept(form):
""" When Time is logged, update the Task & Activity """
db = current.db
titable = db.project_time
ttable = db.project_task
atable = db.project_activity
tatable = db.project_task_activity
# Find the Task
task_id = form.vars.task_id
if not task_id:
# Component Form
query = (titable.id == form.vars.id)
record = db(query).select(titable.task_id,
limitby=(0, 1)).first()
if record:
task_id = record.task_id
# Total the Hours Logged
query = (titable.deleted == False) & \
(titable.task_id == task_id)
rows = db(query).select(titable.hours)
hours = 0
for row in rows:
if row.hours:
hours += row.hours
# Update the Task
query = (ttable.id == task_id)
db(query).update(time_actual=hours)
# Find the Activity
query = (tatable.deleted == False) & \
(tatable.task_id == task_id)
activity = db(query).select(tatable.activity_id,
limitby=(0, 1)).first()
if activity:
activity_id = activity.activity_id
# Find all Tasks in this Activity
query = (ttable.deleted == False) & \
(tatable.deleted == False) & \
(tatable.task_id == ttable.id) & \
(tatable.activity_id == activity_id)
tasks = db(query).select(ttable.time_actual)
# Total the Hours Logged
hours = 0
for task in tasks:
hours += task.time_actual or 0 # Handle None
# Update the Activity
query = (atable.id == activity_id)
db(query).update(time_actual=hours)
# =============================================================================
class S3ProjectTaskHRMModel(S3Model):
"""
Project Task HRM Model
This class holds the tables used to link Tasks to Human Resources
- either individuals or Job Roles
"""
names = ("project_task_job_title",
"project_task_human_resource",
)
def model(self):
define_table = self.define_table
task_id = self.project_task_id
# ---------------------------------------------------------------------
# Link Tasks <> Human Resources
tablename = "project_task_human_resource"
define_table(tablename,
task_id(empty = False,
ondelete = "CASCADE",
),
self.hrm_human_resource_id(empty = False,
# @ToDo: Flag that there are open Tasks Assigned
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Link Tasks <> Job Roles
tablename = "project_task_job_title"
define_table(tablename,
task_id(empty = False,
ondelete = "CASCADE",
),
self.hrm_job_title_id(empty = False,
# @ToDo: Flag that there are open Tasks Assigned
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class S3ProjectTaskIReportModel(S3Model):
"""
Project Task IReport Model
This class holds the table used to link Tasks with Incident Reports.
@ToDo: Deprecate as we link to Incidents instead: S3EventTaskModel
"""
names = ("project_task_ireport",)
def model(self):
# Link Tasks <-> Incident Reports
#
tablename = "project_task_ireport"
self.define_table(tablename,
self.project_task_id(empty = False,
ondelete = "CASCADE",
),
self.irs_ireport_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
self.configure(tablename,
onaccept=self.task_ireport_onaccept)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def task_ireport_onaccept(form):
"""
When a Task is linked to an IReport, then populate the location_id
"""
vars = form.vars
ireport_id = vars.ireport_id
task_id = vars.task_id
db = current.db
# Check if we already have a Location for the Task
table = db.project_task
query = (table.id == task_id)
record = db(query).select(table.location_id,
limitby=(0, 1)).first()
if not record or record.location_id:
return
# Find the Incident Location
itable = db.irs_ireport
query = (itable.id == ireport_id)
record = db(query).select(itable.location_id,
limitby=(0, 1)).first()
if not record or not record.location_id:
return
location_id = record.location_id
# Update the Task
query = (table.id == task_id)
db(query).update(location_id=location_id)
# =============================================================================
def multi_theme_percentage_represent(id):
"""
Representation for Theme Percentages
for multiple=True options
"""
if not id:
return current.messages["NONE"]
s3db = current.s3db
table = s3db.project_theme_percentage
ttable = s3db.project_theme
def represent_row(row):
return "%s (%s%s)" % (row.project_theme.name,
row.project_theme_percentage.percentage,
"%")
if isinstance(id, (list, tuple)):
query = (table.id.belongs(id)) & \
(ttable.id == table.theme_id)
rows = current.db(query).select(table.percentage,
ttable.name)
repr = ", ".join(represent_row(row) for row in rows)
return repr
else:
query = (table.id == id) & \
(ttable.id == table.theme_id)
row = current.db(query).select(table.percentage,
ttable.name).first()
try:
return represent_row(row)
except:
return current.messages.UNKNOWN_OPT
# =============================================================================
class project_LocationRepresent(S3Represent):
""" Representation of Project Locations """
def __init__(self,
translate=False,
show_link=False,
multiple=False,
):
settings = current.deployment_settings
if settings.get_project_community():
# Community is the primary resource
self.community = True
else:
# Location is just a way to display Projects on a map
self.community = False
if settings.get_gis_countries() == 1:
self.multi_country = False
else:
self.multi_country = True
self.use_codes = settings.get_project_codes()
self.lookup_rows = self.custom_lookup_rows
super(project_LocationRepresent,
self).__init__(lookup="project_location",
show_link=show_link,
translate=translate,
multiple=multiple)
# -------------------------------------------------------------------------
def custom_lookup_rows(self, key, values, fields=None):
"""
Custom lookup method for organisation rows, does a
join with the projects and locations. Parameters
key and fields are not used, but are kept for API
compatiblity reasons.
@param values: the project_location IDs
"""
db = current.db
ltable = current.s3db.project_location
gtable = db.gis_location
fields = [ltable.id, # pkey is needed for the cache
gtable.name,
gtable.level,
gtable.L0,
gtable.L1,
gtable.L2,
gtable.L3,
gtable.L4,
gtable.L5,
]
if len(values) == 1:
query = (ltable.id == values[0]) & \
(ltable.location_id == gtable.id)
limitby = (0, 1)
else:
query = (ltable.id.belongs(values)) & \
(ltable.location_id == gtable.id)
limitby = None
if not self.community:
ptable = db.project_project
query &= (ltable.project_id == ptable.id)
fields.append(ptable.name)
if self.use_codes:
fields.append(ptable.code)
rows = db(query).select(*fields,
limitby=limitby)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the joined Row
"""
community = self.community
if not self.community:
prow = row["project_project"]
row = row["gis_location"]
name = row.name
level = row.level
if level == "L0":
location = name
elif name:
locations = [name]
lappend = locations.append
matched = False
L5 = row.L5
if L5:
if L5 == name:
matched = True
else:
lappend(L5)
L4 = row.L4
if L4:
if L4 == name:
if matched:
lappend(L4)
matched = True
else:
lappend(L4)
L3 = row.L3
if L3:
if L3 == name:
if matched:
lappend(L3)
matched = True
else:
lappend(L3)
L2 = row.L2
if L2:
if L2 == name:
if matched:
lappend(L2)
matched = True
else:
lappend(L2)
L1 = row.L1
if L1:
if L1 == name:
if matched:
lappend(L1)
matched = True
else:
lappend(L1)
if self.multi_country:
L0 = row.L0
if L0:
if L0 == name:
if matched:
lappend(L0)
matched = True
else:
lappend(L0)
location = ", ".join(locations)
else:
locations = [row[level] for level in ("L5", "L4", "L3", "L2", "L1") if row[level]]
if self.multi_country:
L0 = row.L0
if L0:
locations.append(L0)
location = ", ".join(locations)
if community:
return s3_unicode(location)
else:
if self.use_codes and prow.code:
project = "%s: %s" % (prow.code, prow.name)
else:
project = prow.name
name = "%s (%s)" % (project, location)
return s3_unicode(name)
# =============================================================================
def task_notify(form):
"""
If the task is assigned to someone then notify them
"""
vars = form.vars
pe_id = vars.pe_id
if not pe_id:
return
user = current.auth.user
if user and user.pe_id == pe_id:
# Don't notify the user when they assign themselves tasks
return
if int(vars.status) not in current.response.s3.project_task_active_statuses:
# No need to notify about closed tasks
return
if form.record is None or (int(pe_id) != form.record.pe_id):
# Assignee has changed
settings = current.deployment_settings
if settings.has_module("msg"):
# Notify assignee
subject = "%s: Task assigned to you" % settings.get_system_name_short()
url = "%s%s" % (settings.get_base_public_url(),
URL(c="project", f="task", args=vars.id))
priority = current.s3db.project_task.priority.represent(int(vars.priority))
message = "You have been assigned a Task:\n\n%s\n\n%s\n\n%s\n\n%s" % \
(url,
"%s priority" % priority,
vars.name,
vars.description or "")
current.msg.send_by_pe_id(pe_id, subject, message)
return
# =============================================================================
class project_TaskRepresent(S3Represent):
""" Representation of project tasks """
def __init__(self,
show_link=False,
show_project=False,
project_first=True):
"""
Constructor
@param show_link: render representation as link to the task
@param show_project: show the project name in the representation
@param project_first: show the project name before the task name
"""
task_url = URL(c="project", f="task", args=["[id]"])
super(project_TaskRepresent, self).__init__(lookup = "project_task",
show_link = show_link,
linkto = task_url,
)
self.show_project = show_project
if show_project:
self.project_represent = S3Represent(lookup = "project_project")
self.project_first = project_first
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=[]):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
s3db = current.s3db
ttable = s3db.project_task
fields = [ttable.id, ttable.name]
show_project = self.show_project
if show_project:
ltable = s3db.project_task_project
left = ltable.on(ltable.task_id == ttable.id)
fields.append(ltable.project_id)
else:
left = None
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(left = left, *fields)
self.queries += 1
if show_project and rows:
# Bulk-represent the project_ids
project_ids = [row.project_task_project.project_id
for row in rows]
if project_ids:
self.project_represent.bulk(project_ids)
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
output = row["project_task.name"]
if self.show_project:
project_id = row["project_task_project.project_id"]
if self.project_first:
if project_id:
strfmt = "%(project)s: %(task)s"
else:
strfmt = "- %(task)s"
else:
if project_id:
strfmt = "%(task)s (%(project)s)"
else:
strfmt = "%(task)s"
output = strfmt % {"task": s3_unicode(output),
"project": self.project_represent(project_id),
}
return output
# =============================================================================
class project_ActivityRepresent(S3Represent):
""" Representation of Project Activities """
def __init__(self,
translate=False,
show_link=False,
multiple=False):
if current.deployment_settings.get_project_projects():
# Need a custom lookup
self.code = True
self.lookup_rows = self.custom_lookup_rows
fields = ["project_activity.name",
"project_project.code",
]
else:
# Can use standard lookup of fields
self.code = False
fields = ["name"]
super(project_ActivityRepresent,
self).__init__(lookup="project_activity",
fields=fields,
show_link=show_link,
translate=translate,
multiple=multiple)
# -------------------------------------------------------------------------
def custom_lookup_rows(self, key, values, fields=[]):
"""
Custom lookup method for activity rows, does a
left join with the parent project. Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the activity IDs
"""
db = current.db
s3db = current.s3db
atable = s3db.project_activity
ptable = s3db.project_project
left = ptable.on(ptable.id == atable.project_id)
qty = len(values)
if qty == 1:
query = (atable.id == values[0])
limitby = (0, 1)
else:
query = (atable.id.belongs(values))
limitby = (0, qty)
rows = db(query).select(atable.id,
atable.name,
ptable.code,
left=left,
limitby=limitby)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the project_activity Row
"""
if self.code:
# Custom Row (with the project left-joined)
name = row["project_activity.name"]
code = row["project_project.code"]
if not name:
return row["project_activity.id"]
else:
# Standard row (from fields)
name = row["name"]
if not name:
return row["id"]
if self.code and code:
name = "%s > %s" % (code, name)
return s3_unicode(name)
# =============================================================================
def project_activity_year_options():
"""
returns a dict of the options for the year virtual field
used by the search widget
orderby needed for postgres
@ToDo: Migrate to stats_year_options()
"""
db = current.db
table = current.s3db.project_activity
query = (table.deleted == False)
min_field = table.date.min()
start_date_min = db(query).select(min_field,
orderby=min_field,
limitby=(0, 1)
).first()[min_field]
if start_date_min:
start_year = start_date_min.year
else:
start_year = None
max_field = table.end_date.max()
end_date_max = db(query).select(max_field,
orderby=max_field,
limitby=(0, 1)
).first()[max_field]
if end_date_max:
end_year = end_date_max.year
else:
end_year = None
if not start_year or not end_year:
return {start_year:start_year} or {end_year:end_year}
years = {}
for year in xrange(start_year, end_year + 1):
years[year] = year
return years
# =============================================================================
class S3ProjectThemeVirtualFields:
""" Virtual fields for the project table """
def themes(self):
"""
Themes associated with this Project
"""
try:
project_id = self.project_project.id
except AttributeError:
return ""
s3db = current.s3db
ptable = s3db.project_project
ttable = s3db.project_theme
ltable = s3db.project_theme_percentage
query = (ltable.deleted != True) & \
(ltable.project_id == project_id) & \
(ltable.theme_id == ttable.id)
themes = current.db(query).select(ttable.name,
ltable.percentage)
if not themes:
return current.messages["NONE"]
represent = ""
for theme in themes:
name = theme.project_theme.name
percentage = theme.project_theme_percentage.percentage
if represent:
represent = "%s, %s (%s%s)" % (represent,
name,
percentage,
"%")
else:
represent = "%s (%s%s)" % (name, percentage, "%")
return represent
# =============================================================================
# project_time virtual fields
#
def project_time_day(row):
"""
Virtual field for project_time - abbreviated string format for
date, allows grouping per day instead of the individual datetime,
used for project time report.
Requires "date" to be in the additional report_fields
@param row: the Row
"""
try:
thisdate = row["project_time.date"]
except AttributeError:
return current.messages["NONE"]
if not thisdate:
return current.messages["NONE"]
now = current.request.utcnow
week = datetime.timedelta(days=7)
#if thisdate < (now - week):
# Ignore data older than the last week
# - should already be filtered in controller anyway
# return default
return thisdate.date().strftime("%d %B %y")
# =============================================================================
def project_time_week(row):
"""
Virtual field for project_time - returns the date of the Monday
(=first day of the week) of this entry, used for project time report.
Requires "date" to be in the additional report_fields
@param row: the Row
"""
try:
thisdate = row["project_time.date"]
except AttributeError:
return current.messages["NONE"]
if not thisdate:
return current.messages["NONE"]
day = thisdate.date()
monday = day - datetime.timedelta(days=day.weekday())
return monday
# =============================================================================
def project_ckeditor():
""" Load the Project Comments JS """
s3 = current.response.s3
ckeditor = URL(c="static", f="ckeditor", args="ckeditor.js")
s3.scripts.append(ckeditor)
adapter = URL(c="static", f="ckeditor", args=["adapters", "jquery.js"])
s3.scripts.append(adapter)
# Toolbar options: http://docs.cksource.com/CKEditor_3.x/Developers_Guide/Toolbar
# @ToDo: Move to Static
js = "".join((
'''i18n.reply="''', str(current.T("Reply")), '''"
var img_path=S3.Ap.concat('/static/img/jCollapsible/')
var ck_config={toolbar:[['Bold','Italic','-','NumberedList','BulletedList','-','Link','Unlink','-','Smiley','-','Source','Maximize']],toolbarCanCollapse:false,removePlugins:'elementspath'}
function comment_reply(id){
$('#project_comment_task_id__row').hide()
$('#project_comment_task_id__row1').hide()
$('#comment-title').html(i18n.reply)
$('#project_comment_body').ckeditorGet().destroy()
$('#project_comment_body').ckeditor(ck_config)
$('#comment-form').insertAfter($('#comment-'+id))
$('#project_comment_parent').val(id)
var task_id = $('#comment-'+id).attr('task_id')
$('#project_comment_task_id').val(task_id)
}'''))
s3.js_global.append(js)
# =============================================================================
def project_rheader(r):
""" Project Resource Headers - used in Project & Budget modules """
if r.representation != "html":
# RHeaders only used in interactive views
return None
# Need to use this as otherwise demographic_data?viewing=project_location.x
# doesn't have an rheader
tablename, record = s3_rheader_resource(r)
if not record:
return None
s3db = current.s3db
table = s3db.table(tablename)
resourcename = r.name
T = current.T
#auth = current.auth
settings = current.deployment_settings
attachments_label = settings.get_ui_label_attachments()
if resourcename == "project":
mode_3w = settings.get_project_mode_3w()
mode_task = settings.get_project_mode_task()
details_tab = settings.get_project_details_tab()
indicators = settings.get_project_indicators()
# Tabs
#ADMIN = current.session.s3.system_roles.ADMIN
#admin = auth.s3_has_role(ADMIN)
#staff = auth.s3_has_role("STAFF")
#staff = True
tabs = [(T("Basic Details"), None)]
append = tabs.append
if settings.get_project_goals():
append((T("Goals"), "goal"))
if settings.get_project_outcomes():
append((T("Outcomes"), "outcome"))
outputs = settings.get_project_outputs()
if outputs and outputs != "inline":
append((T("Outputs"), "output"))
if indicators:
append((T("Indicators"), "indicator"))
append((T("Indicator Data"), "indicator_data"))
if settings.get_project_multiple_organisations() and not details_tab:
append((T("Organizations"), "organisation"))
if settings.get_project_community() and not details_tab:
append((T("Communities"), "location"))
elif not mode_task and not details_tab:
append((T("Locations"), "location"))
if settings.get_project_theme_percentages():
append((T("Themes"), "theme"))
if mode_3w and not details_tab:
append((T("Beneficiaries"), "beneficiary"))
if settings.get_project_milestones():
append((T("Milestones"), "milestone"))
if settings.get_project_activities():
append((T("Activities"), "activity"))
if mode_task:
append((T("Tasks"), "task"))
if record.calendar:
append((T("Calendar"), "timeline"))
if settings.get_project_budget_monitoring():
append((T("Budget Monitoring"), "monitoring"))
elif settings.get_project_multiple_budgets():
append((T("Annual Budgets"), "annual_budget"))
if details_tab:
append((T("Details"), "details"))
else:
if mode_3w:
append((T("Documents"), "document"))
else:
append((attachments_label, "document"))
if settings.get_hrm_show_staff():
STAFF = settings.get_hrm_staff_label()
if not details_tab:
#append((STAFF, "human_resource", dict(group="staff")))
append((STAFF, "human_resource"))
if current.auth.s3_has_permission("create", "project_human_resource"):
append((T("Assign %(staff)s") % dict(staff=STAFF), "assign"))
#if settings.has_module("vol"):
# append((T("Volunteers"), "human_resource", dict(group="volunteer")))
rheader_fields = [["code", "name"],
["organisation_id"],
["start_date", "end_date"]
]
if indicators:
rheader_fields.append(["current_status_by_indicators", "overall_status_by_indicators"])
# @ToDo: Either get S3ResourceHeader to support selectors or else rewrite manually
#if settings.get_project_budget_monitoring():
# rheader_fields.append(["budget.total_budget"])
rheader = S3ResourceHeader(rheader_fields, tabs)(r)
elif resourcename in ("location", "demographic_data"):
tabs = [(T("Details"), None),
(T("Beneficiaries"), "beneficiary"),
(T("Demographics"), "demographic_data/"),
(T("Contact People"), "contact"),
]
rheader_fields = []
if record.project_id is not None:
rheader_fields.append(["project_id"])
rheader_fields.append(["location_id"])
rheader = S3ResourceHeader(rheader_fields, tabs)(r,
record = record,
table = table)
elif resourcename == "framework":
tabs = [(T("Details"), None),
(T("Organizations"), "organisation"),
(T("Documents"), "document")]
rheader_fields = [["name"]]
rheader = S3ResourceHeader(rheader_fields, tabs)(r)
elif resourcename == "activity":
tabs = [(T("Details"), None),
(T("Contact People"), "contact")]
if settings.get_project_mode_task():
tabs.append((T("Tasks"), "task"))
tabs.append((attachments_label, "document"))
else:
tabs.append((T("Documents"), "document"))
rheader_fields = []
if record.project_id is not None:
rheader_fields.append(["project_id"])
rheader_fields.append(["name"])
rheader_fields.append(["location_id"])
rheader = S3ResourceHeader(rheader_fields, tabs)(r)
elif resourcename == "task":
# Tabs
tabs = [(T("Details"), None)]
append = tabs.append
append((attachments_label, "document"))
if settings.has_module("msg"):
append((T("Notify"), "dispatch"))
#(T("Roles"), "job_title"),
#(T("Assignments"), "human_resource"),
#(T("Requests"), "req")
rheader_tabs = s3_rheader_tabs(r, tabs)
# RHeader
db = current.db
ltable = s3db.project_task_project
ptable = db.project_project
query = (ltable.deleted == False) & \
(ltable.task_id == r.id) & \
(ltable.project_id == ptable.id)
row = db(query).select(ptable.id,
ptable.code,
ptable.name,
limitby=(0, 1)).first()
if row:
project = s3db.project_project_represent(None, row)
project = TR(TH("%s: " % T("Project")),
project,
)
else:
project = ""
atable = s3db.project_activity
ltable = s3db.project_task_activity
query = (ltable.deleted == False) & \
(ltable.task_id == r.id) & \
(ltable.activity_id == atable.id)
activity = db(query).select(atable.name,
limitby=(0, 1)).first()
if activity:
activity = TR(TH("%s: " % T("Activity")),
activity.name
)
else:
activity = ""
if record.description:
description = TR(TH("%s: " % table.description.label),
record.description
)
else:
description = ""
if record.site_id:
facility = TR(TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id),
)
else:
facility = ""
if record.location_id:
location = TR(TH("%s: " % table.location_id.label),
table.location_id.represent(record.location_id),
)
else:
location = ""
if record.created_by:
creator = TR(TH("%s: " % T("Created By")),
s3_auth_user_represent(record.created_by),
)
else:
creator = ""
if record.time_estimated:
time_estimated = TR(TH("%s: " % table.time_estimated.label),
record.time_estimated
)
else:
time_estimated = ""
if record.time_actual:
time_actual = TR(TH("%s: " % table.time_actual.label),
record.time_actual
)
else:
time_actual = ""
rheader = DIV(TABLE(project,
activity,
TR(TH("%s: " % table.name.label),
record.name,
),
description,
facility,
location,
creator,
time_estimated,
time_actual,
#comments,
), rheader_tabs)
return rheader
# =============================================================================
def project_task_controller():
"""
Tasks Controller, defined in the model for use from
multiple controllers for unified menus
"""
T = current.T
s3db = current.s3db
auth = current.auth
s3 = current.response.s3
get_vars = current.request.get_vars
# Pre-process
def prep(r):
tablename = "project_task"
table = s3db.project_task
statuses = s3.project_task_active_statuses
crud_strings = s3.crud_strings[tablename]
if r.record:
if r.interactive:
# Put the Comments in the RFooter
project_ckeditor()
s3.rfooter = LOAD("project", "comments.load",
args=[r.id],
ajax=True)
if r.method == "datalist":
# Set list_fields for renderer (project_task_list_layout)
list_fields = ["name",
"description",
"location_id",
"date_due",
"pe_id",
"status",
#"organisation_id$logo",
"modified_by",
]
if current.deployment_settings.get_project_projects():
list_fields.insert(5, (T("Project"), "task_project.project_id"))
s3db.configure("project_task",
list_fields = list_fields,
)
elif r.method in ("create", "create.popup"):
project_id = r.get_vars.get("task_project.project_id", None)
if project_id:
# Coming from a profile page
s3db.project_task_project.project_id.default = project_id
# Can't do this for an inline form
#field.readable = field.writable = False
elif "mine" in get_vars:
# Show the Open Tasks for this User
if auth.user:
pe_id = auth.user.pe_id
query = (table.pe_id == pe_id) & \
(table.status.belongs(statuses))
r.resource.add_filter(query)
crud_strings.title_list = T("My Open Tasks")
crud_strings.msg_list_empty = T("No Tasks Assigned")
s3db.configure(tablename,
copyable = False,
listadd = False,
)
# No need for assignee (always us) or status (always "assigned"
# or "reopened") in list fields:
list_fields = s3db.get_config(tablename, "list_fields")
if list_fields:
list_fields[:] = (fn for fn in list_fields
if fn not in ("pe_id", "status"))
elif "project" in get_vars:
# Show Open Tasks for this Project
project = get_vars.project
ptable = s3db.project_project
try:
name = current.db(ptable.id == project).select(ptable.name,
limitby=(0, 1)
).first().name
except:
current.session.error = T("Project not Found")
redirect(URL(args=None, vars=None))
query = (FS("task_id:project_task_project.project_id") == project) & \
(FS("status").belongs(statuses))
r.resource.add_filter(query)
crud_strings.title_list = T("Open Tasks for %(project)s") % dict(project=name)
crud_strings.msg_list_empty = T("No Open Tasks for %(project)s") % dict(project=name)
# Add Activity
list_fields = s3db.get_config(tablename,
"list_fields")
try:
# Hide the project column since we know that already
list_fields.remove((T("Project"), "task_project.project_id"))
except ValueError:
# Already removed
pass
s3db.configure(tablename,
copyable = False,
deletable = False,
# Block Add until we get the injectable component lookups
insertable = False,
list_fields = list_fields,
)
elif "open" in get_vars:
# Show Only Open Tasks
crud_strings.title_list = T("All Open Tasks")
r.resource.add_filter(table.status.belongs(statuses))
if r.component:
if r.component_name == "req":
if current.deployment_settings.has_module("hrm"):
r.component.table.type.default = 3
if r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
s3db.req_create_form_mods()
elif r.component_name == "human_resource":
r.component.table.type.default = 2
else:
if not auth.s3_has_role("STAFF"):
# Hide fields to avoid confusion (both of inputters & recipients)
table = r.table
field = table.time_actual
field.readable = field.writable = False
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
if not r.component and r.method != "import":
# Maintain vars: why?
update_url = URL(args=["[id]"], vars=get_vars)
S3CRUD.action_buttons(r, update_url=update_url)
return output
s3.postp = postp
if "mine" in get_vars or "project" in get_vars:
# Show no filters in pre-filtered views
hide_filter = True
else:
hide_filter = None
return current.rest_controller("project", "task",
hide_filter = hide_filter,
rheader = s3db.project_rheader,
)
# =============================================================================
def project_theme_help_fields(options):
"""
Provide the tooltips for the Theme filter
@param options: the options to generate tooltips for, from
S3GroupedOptionsWidget: list of tuples (key, represent)
"""
table = current.s3db.project_theme
keys = dict(options).keys()
rows = current.db(table.id.belongs(keys)).select(table.id,
table.comments)
T = current.T
translated = lambda string: T(string) if string else ""
tooltips = {}
for row in rows:
tooltips[row.id] = translated(row.comments)
return tooltips
# =============================================================================
def project_hazard_help_fields(options):
"""
Provide the tooltips for the Hazard filter
@param options: the options to generate tooltips for, from
S3GroupedOptionsWidget: list of tuples (key, represent)
"""
table = current.s3db.project_hazard
keys = dict(options).keys()
rows = current.db(table.id.belongs(keys)).select(table.id,
table.comments)
T = current.T
translated = lambda string: T(string) if string else ""
tooltips = {}
for row in rows:
tooltips[row.id] = translated(row.comments)
return tooltips
# =============================================================================
def project_hfa_opts():
"""
Provide the options for the HFA filter
HFA: Hyogo Framework Agreement
"""
T = current.T
return {
1: T("HFA1: Ensure that disaster risk reduction is a national and a local priority with a strong institutional basis for implementation."),
2: T("HFA2: Identify, assess and monitor disaster risks and enhance early warning."),
3: T("HFA3: Use knowledge, innovation and education to build a culture of safety and resilience at all levels."),
4: T("HFA4: Reduce the underlying risk factors."),
5: T("HFA5: Strengthen disaster preparedness for effective response at all levels."),
}
# =============================================================================
def project_jnap_opts():
"""
Provide the options for the JNAP filter (currently unused)
JNAP (Joint National Action Plan for Disaster Risk Management
and Climate Change Adaptation): applies to Cook Islands only
"""
T = current.T
return {
1: T("JNAP-1: Strategic Area 1: Governance"),
2: T("JNAP-2: Strategic Area 2: Monitoring"),
3: T("JNAP-3: Strategic Area 3: Disaster Management"),
4: T("JNAP-4: Strategic Area 4: Risk Reduction and Climate Change Adaptation"),
}
# =============================================================================
def project_pifacc_opts():
"""
Provide the options for the PIFACC filter (currently unused)
PIFACC (Pacific Islands Framework for Action on Climate Change):
applies to Pacific countries only
"""
T = current.T
return {
1: T("PIFACC-1: Implementing Tangible, On-Ground Adaptation Measures"),
2: T("PIFACC-2: Governance and Decision Making"),
3: T("PIFACC-3: Improving our understanding of climate change"),
4: T("PIFACC-4: Education, Training and Awareness"),
5: T("PIFACC-5: Mitigation of Global Greenhouse Gas Emissions"),
6: T("PIFACC-6: Partnerships and Cooperation"),
}
# =============================================================================
def project_rfa_opts():
"""
Provide the options for the RFA filter
RFA: applies to Pacific countries only
"""
T = current.T
return {
1: T("RFA1: Governance-Organisational, Institutional, Policy and Decision Making Framework"),
2: T("RFA2: Knowledge, Information, Public Awareness and Education"),
3: T("RFA3: Analysis and Evaluation of Hazards, Vulnerabilities and Elements at Risk"),
4: T("RFA4: Planning for Effective Preparedness, Response and Recovery"),
5: T("RFA5: Effective, Integrated and People-Focused Early Warning Systems"),
6: T("RFA6: Reduction of Underlying Risk Factors"),
}
# =============================================================================
def project_project_filters(org_label):
"""
Filter widgets for project_project
@param org_label: the label to use for organisation_id
"""
T = current.T
settings = current.deployment_settings
filter_widgets = [
S3TextFilter(["name",
"code",
"description",
],
label = T("Search"),
comment = T("Search for a Project by name, code, or description."),
),
S3OptionsFilter("status_id",
label = T("Status"),
cols = 4,
),
S3OptionsFilter("organisation_id",
label = org_label,
# Can be unhidden in customise_xx_resource if there is a need to use a default_filter
hidden = True,
),
S3LocationFilter("location.location_id",
# Default should introspect
#levels = ("L0", "L1", "L2"),
hidden = True,
)
]
append_filter = filter_widgets.append
if settings.get_project_programmes():
append_filter(
S3OptionsFilter("programme_project.programme_id",
label = T("Programme"),
hidden = True,
)
)
if settings.get_project_sectors():
if settings.get_ui_label_cluster():
sector = T("Cluster")
else:
sector = T("Sector")
append_filter(
S3OptionsFilter("sector_project.sector_id",
label = sector,
location_filter = True,
none = True,
hidden = True,
)
)
mode_drr = settings.get_project_mode_drr()
if mode_drr:
append_filter(
S3OptionsFilter("hazard_project.hazard_id",
label = T("Hazard"),
help_field = project_hazard_help_fields,
cols = 4,
hidden = True,
)
)
if settings.get_project_mode_3w():
append_filter(
S3OptionsFilter("theme_project.theme_id",
label = T("Theme"),
help_field = project_theme_help_fields,
cols = 4,
hidden = True,
)
)
if mode_drr:
hfa_opts = project_hfa_opts()
options = dict((key, "HFA %s" % key) for key in hfa_opts)
#options[None] = current.messages["NONE"] # to search NO HFA
append_filter(
S3OptionsFilter("drr.hfa",
label = T("HFA"),
options = options,
help_field = hfa_opts,
cols = 5,
hidden = True,
)
)
if settings.get_project_multiple_organisations():
append_filter(
S3OptionsFilter("partner.organisation_id",
label = T("Partners"),
hidden = True,
)
)
append_filter(
S3OptionsFilter("donor.organisation_id",
label = T("Donors"),
hidden = True,
)
)
return filter_widgets
# =============================================================================
def project_project_list_layout(list_id, item_id, resource, rfields, record,
icon="tasks"):
"""
Default dataList item renderer for Projects on Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["project_project.id"]
item_class = "thumbnail"
raw = record._row
author = record["project_project.modified_by"]
date = record["project_project.modified_on"]
name = record["project_project.name"]
description = record["project_project.description"]
start_date = record["project_project.start_date"]
organisation = record["project_project.organisation_id"]
organisation_id = raw["project_project.organisation_id"]
location = record["project_location.location_id"]
location_id = raw["project_location.location_id"]
comments = raw["project_project.comments"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
org_logo = raw["org_organisation.logo"]
if org_logo:
org_logo = A(IMG(_src=URL(c="default", f="download", args=[org_logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
# @ToDo: use a dummy logo image
org_logo = A(IMG(_class="media-object"),
_href=org_url,
_class="pull-left",
)
# Edit Bar
# @ToDo: Consider using S3NavigationItem to hide the auth-related parts
permit = current.auth.s3_has_permission
table = current.db.project_project
if permit("update", table, record_id=record_id):
vars = {"refresh": list_id,
"record": record_id,
}
edit_btn = A(ICON("edit"),
_href=URL(c="project", f="project",
args=[record_id, "update.popup"]
),
_class="s3_modal",
_title=current.response.s3.crud_strings.project_project.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
_title=current.response.s3.crud_strings.project_project.label_delete_button,
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON(icon),
SPAN(A(name,
_href = URL(c="project", f="project",
args=[record_id, "profile"])),
_class="card-title"),
SPAN(location, _class="location-title"),
SPAN(start_date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(org_logo,
DIV(DIV((description or ""),
DIV(author or "",
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def project_task_list_layout(list_id, item_id, resource, rfields, record,
icon="tasks"):
"""
Default dataList item renderer for Tasks on Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["project_task.id"]
item_class = "thumbnail"
raw = record._row
author = record["project_task.modified_by"]
date = record["project_task.modified_on"]
name = record["project_task.name"]
assigned_to = record["project_task.pe_id"] or ""
description = record["project_task.description"]
date_due = record["project_task.date_due"]
source_url = raw["project_task.source_url"]
status = raw["project_task.status"]
priority = raw["project_task.priority"]
project_id = raw["project_task_project.project_id"]
if project_id:
project = record["project_task_project.project_id"]
project = SPAN(A(project,
_href = URL(c="project", f="project",
args=[project_id, "profile"])
),
" > ",
_class="task_project_title"
)
else:
project = ""
if priority in (1, 2):
# Urgent / High
priority_icon = DIV(ICON("exclamation"),
_class="task_priority")
elif priority == 4:
# Low
priority_icon = DIV(ICON("arrow-down"),
_class="task_priority")
else:
priority_icon = ""
# @ToDo: Support more than just the Wrike/MCOP statuses
status_icon_colour = {2: "#AFC1E5",
6: "#C8D571",
7: "#CEC1FF",
12: "#C6C6C6",
}
active_statuses = current.s3db.project_task_active_statuses
status_icon = DIV(ICON("active" if status in active_statuses else "inactive"),
_class="task_status",
_style="background-color:%s" % (status_icon_colour.get(status, "none"))
)
location = record["project_task.location_id"]
location_id = raw["project_task.location_id"]
comments = raw["project_task.comments"]
org_logo = ""
#org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
#org_logo = raw["org_organisation.logo"]
#if org_logo:
# org_logo = A(IMG(_src=URL(c="default", f="download", args=[org_logo]),
# _class="media-object",
# ),
# _href=org_url,
# _class="pull-left",
# )
#else:
# # @ToDo: use a dummy logo image
# org_logo = A(IMG(_class="media-object"),
# _href=org_url,
# _class="pull-left",
# )
# Edit Bar
# @ToDo: Consider using S3NavigationItem to hide the auth-related parts
permit = current.auth.s3_has_permission
table = current.db.project_task
if permit("update", table, record_id=record_id):
edit_btn = A(ICON("edit"),
_href=URL(c="project", f="task",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id},
),
_class="s3_modal",
_title=current.response.s3.crud_strings.project_task.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
_title=current.response.s3.crud_strings.project_task.label_delete_button,
)
else:
delete_btn = ""
if source_url:
source_btn = A(ICON("link"),
_title=source_url,
_href=source_url,
_target="_blank"
)
else:
source_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
source_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON(icon),
SPAN(location, _class="location-title"),
SPAN(date_due, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(org_logo,
priority_icon,
DIV(project,
name, _class="card-title task_priority"),
status_icon,
DIV(DIV((description or ""),
DIV(author,
" - ",
assigned_to,
#A(organisation,
# _href=org_url,
# _class="card-organisation",
# ),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
class project_Details(S3Method):
"""
Custom profile page with multiple DataTables:
* Organisations
* Locations
* Beneficiaries
* Documents
* Staff
"""
def __init__(self, form=None):
"""
Constructor
@param form: widget config to inject at the top of the page,
or a callable to produce such a widget config
"""
self.form = form
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST API
@param r: the S3Request
@param attr: controller arguments
"""
if r.name == "project" and \
r.id and \
not r.component and \
r.representation in ("html", "aadata"):
T = current.T
s3db = current.s3db
settings = current.deployment_settings
def dt_row_actions(component):
return lambda r, list_id: [
{"label": T("Open"),
"url": r.url(component=component,
component_id="[id]",
method="update.popup",
vars={"refresh": list_id}),
"_class": "action-btn edit s3_modal",
},
{"label": T("Delete"),
"_ajaxurl": r.url(component=component,
component_id="[id]",
method="delete.json",
),
"_class": "action-btn delete-btn-ajax dt-ajax-delete",
},
]
profile_widgets = []
form = self.form
if form:
if callable(form):
form = form(r)
if form is not None:
profile_widgets.append(form)
if settings.get_project_multiple_organisations():
orgs_widget = dict(label = "Organizations",
label_create = "Add Organization",
type = "datatable",
actions = dt_row_actions("organisation"),
tablename = "project_organisation",
context = "project",
create_controller = "project",
create_function = "project",
create_component = "organisation",
pagesize = None, # all records
)
profile_widgets.append(orgs_widget)
if settings.get_project_community():
label = "Communities"
label_create = "Add Community"
else:
label = "Locations"
label_create = "Add Location"
locations_widget = dict(label = label,
label_create = label_create,
type = "datatable",
actions = dt_row_actions("location"),
tablename = "project_location",
context = "project",
create_controller = "project",
create_function = "project",
create_component = "location",
pagesize = None, # all records
)
profile_widgets.append(locations_widget)
if settings.get_project_mode_3w():
beneficiaries_widget = dict(label = "Beneficiaries",
label_create = "Add Beneficiaries",
type = "datatable",
actions = dt_row_actions("beneficiary"),
tablename = "project_beneficiary",
context = "project",
create_controller = "project",
create_function = "project",
create_component = "beneficiary",
pagesize = None, # all records
)
profile_widgets.append(beneficiaries_widget)
label = T("Documents")
else:
label = attachments_label
docs_widget = dict(label = label,
label_create = "Add Document",
type = "datatable",
actions = dt_row_actions("document"),
tablename = "doc_document",
# @ToDo: Fix Filter
#context = "project",
context = ("~.doc_id", "doc_id"),
create_controller = "project",
create_function = "project",
create_component = "document",
pagesize = None, # all records
)
profile_widgets.append(docs_widget)
if settings.get_hrm_show_staff():
STAFF = settings.get_hrm_staff_label()
hr_widget = dict(label = STAFF,
label_create = "Add %(staff)s" % dict(staff=STAFF),
type = "datatable",
actions = dt_row_actions("human_resource"),
tablename = "hrm_human_resource",
context = "project",
create_controller = "project",
create_function = "project",
create_component = "human_resource",
pagesize = None, # all records
)
profile_widgets.append(hr_widget)
if r.representation == "html":
response = current.response
# Maintain normal rheader for consistency
profile_header = TAG[""](H2(response.s3.crud_strings["project_project"].title_display),
DIV(project_rheader(r), _id="rheader"),
)
else:
profile_header = None
tablename = r.tablename
s3db.configure(tablename,
profile_cols = 1,
profile_header = profile_header,
profile_widgets = profile_widgets,
)
profile = S3Profile()
profile.tablename = tablename
profile.request = r
output = profile.profile(r, **attr)
if r.representation == "html":
output["title"] = response.title = T("Details")
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# END =========================================================================
| mit |
hoatle/odoo | addons/stock_dropshipping/tests/test_invoicing.py | 257 | 2284 | # Author: Leonardo Pistone
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp.tests.common import TransactionCase
class TestCreateInvoice(TransactionCase):
def setUp(self):
super(TestCreateInvoice, self).setUp()
self.Wizard = self.env['stock.invoice.onshipping']
self.customer = self.env.ref('base.res_partner_3')
product = self.env.ref('product.product_product_36')
dropship_route = self.env.ref('stock_dropshipping.route_drop_shipping')
self.so = self.env['sale.order'].create({
'partner_id': self.customer.id,
})
self.sol = self.env['sale.order.line'].create({
'name': '/',
'order_id': self.so.id,
'product_id': product.id,
'route_id': dropship_route.id,
})
def test_po_on_delivery_creates_correct_invoice(self):
self.so.action_button_confirm()
po = self.so.procurement_group_id.procurement_ids.purchase_id
self.assertTrue(po)
po.invoice_method = 'picking'
po.signal_workflow('purchase_confirm')
picking = po.picking_ids
self.assertEqual(1, len(picking))
picking.action_done()
wizard = self.Wizard.with_context({
'active_id': picking.id,
'active_ids': [picking.id],
}).create({})
invoice_ids = wizard.create_invoice()
invoices = self.env['account.invoice'].browse(invoice_ids)
self.assertEqual(1, len(invoices))
self.assertEqual(invoices.type, 'in_invoice')
self.assertEqual(invoices, po.invoice_ids)
| agpl-3.0 |
DESHONOR/android_kernel_huawei_g620s_Eloy | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
fener06/pyload | module/gui/PackageDock.py | 41 | 3198 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: mkaay
"""
import re
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class NewPackageDock(QDockWidget):
def __init__(self):
QDockWidget.__init__(self, _("New Package"))
self.setObjectName("New Package Dock")
self.widget = NewPackageWindow(self)
self.setWidget(self.widget)
self.setAllowedAreas(Qt.RightDockWidgetArea|Qt.LeftDockWidgetArea)
self.hide()
def slotDone(self):
text = str(self.widget.box.toPlainText())
pw = str(self.widget.passwordInput.text())
if not pw:
pw = None
lines = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
lines.append(line)
self.emit(SIGNAL("done"), str(self.widget.nameInput.text()), lines, pw)
self.widget.nameInput.setText("")
self.widget.passwordInput.setText("")
self.widget.box.clear()
self.hide()
def parseUri(self):
text=str(self.widget.box.toPlainText())
self.widget.box.setText("")
result = re.findall(r"(?:ht|f)tps?:\/\/[a-zA-Z0-9\-\.\/\?=_&%#]+[<| |\"|\'|\r|\n|\t]{1}", text)
for url in result:
if "\n" or "\t" or "\r" or "\"" or "<" or "'" in url:
url = url[:-1]
self.widget.box.append("%s " % url)
class NewPackageWindow(QWidget):
def __init__(self, dock):
QWidget.__init__(self)
self.dock = dock
self.setLayout(QGridLayout())
layout = self.layout()
nameLabel = QLabel(_("Name"))
nameInput = QLineEdit()
passwordLabel = QLabel(_("Password"))
passwordInput = QLineEdit()
linksLabel = QLabel(_("Links in this Package"))
self.box = QTextEdit()
self.nameInput = nameInput
self.passwordInput = passwordInput
save = QPushButton(_("Create"))
parseUri = QPushButton(_("Filter URLs"))
layout.addWidget(nameLabel, 0, 0)
layout.addWidget(nameInput, 0, 1)
layout.addWidget(passwordLabel, 1, 0)
layout.addWidget(passwordInput, 1, 1)
layout.addWidget(linksLabel, 2, 0, 1, 2)
layout.addWidget(self.box, 3, 0, 1, 2)
layout.addWidget(parseUri, 4, 0, 1, 2)
layout.addWidget(save, 5, 0, 1, 2)
self.connect(save, SIGNAL("clicked()"), self.dock.slotDone)
self.connect(parseUri, SIGNAL("clicked()"), self.dock.parseUri) | gpl-3.0 |
veger/ansible | lib/ansible/modules/remote_management/imc/imc_rest.py | 27 | 14815 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Dag Wieers <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: imc_rest
short_description: Manage Cisco IMC hardware through its REST API
description:
- Provides direct access to the Cisco IMC REST API.
- Perform any configuration changes and actions that the Cisco IMC supports.
- More information about the IMC REST API is available from
U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html)
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
requirements:
- lxml
- xmljson >= 0.1.8
options:
hostname:
description:
- IP Address or hostname of Cisco IMC, resolvable by Ansible control host.
required: true
aliases: [ host, ip ]
username:
description:
- Username used to login to the switch.
default: admin
aliases: [ user ]
password:
description:
- The password to use for authentication.
default: password
path:
description:
- Name of the absolute path of the filename that includes the body
of the http request being sent to the Cisco IMC REST API.
- Parameter C(path) is mutual exclusive with parameter C(content).
aliases: [ 'src', 'config_file' ]
content:
description:
- When used instead of C(path), sets the content of the API requests directly.
- This may be convenient to template simple requests, for anything complex use the M(template) module.
- You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream,
the Cisco IMC output is subsequently merged.
- Parameter C(content) is mutual exclusive with parameter C(path).
protocol:
description:
- Connection protocol to use.
default: https
choices: [ http, https ]
timeout:
description:
- The socket level timeout in seconds.
- This is the time that every single connection (every fragment) can spend.
If this C(timeout) is reached, the module will fail with a
C(Connection failure) indicating that C(The read operation timed out).
default: 60
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
notes:
- The XML fragments don't need an authentication cookie, this is injected by the module automatically.
- The Cisco IMC XML output is being translated to JSON using the Cobra convention.
- Any configConfMo change requested has a return status of 'modified', even if there was no actual change
from the previous configuration. As a result, this module will always report a change on subsequent runs.
In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt.
- If you get a C(Connection failure) related to C(The read operation timed out) increase the C(timeout)
parameter. Some XML fragments can take longer than the default timeout.
- More information about the IMC REST API is available from
U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html)
'''
EXAMPLES = r'''
- name: Power down server
imc_rest:
hostname: '{{ imc_hostname }}'
username: '{{ imc_username }}'
password: '{{ imc_password }}'
validate_certs: no
content: |
<configConfMo><inConfig>
<computeRackUnit dn="sys/rack-unit-1" adminPower="down"/>
</inConfig></configConfMo>
delegate_to: localhost
- name: Configure IMC using multiple XML fragments
imc_rest:
hostname: '{{ imc_hostname }}'
username: '{{ imc_username }}'
password: '{{ imc_password }}'
validate_certs: no
timeout: 120
content: |
<!-- Configure Serial-on-LAN -->
<configConfMo><inConfig>
<solIf dn="sys/rack-unit-1/sol-if" adminState="enable" speed=="115200" comport="com0"/>
</inConfig></configConfMo>
<!-- Configure Console Redirection -->
<configConfMo><inConfig>
<biosVfConsoleRedirection dn="sys/rack-unit-1/bios/bios-settings/Console-redirection"
vpBaudRate="115200"
vpConsoleRedirection="com-0"
vpFlowControl="none"
vpTerminalType="vt100"
vpPuttyKeyPad="LINUX"
vpRedirectionAfterPOST="Always Enable"/>
</inConfig></configConfMo>
delegate_to: localhost
- name: Enable PXE boot and power-cycle server
imc_rest:
hostname: '{{ imc_hostname }}'
username: '{{ imc_username }}'
password: '{{ imc_password }}'
validate_certs: no
content: |
<!-- Configure PXE boot -->
<configConfMo><inConfig>
<lsbootLan dn="sys/rack-unit-1/boot-policy/lan-read-only" access="read-only" order="1" prot="pxe" type="lan"/>
</inConfig></configConfMo>
<!-- Power cycle server -->
<configConfMo><inConfig>
<computeRackUnit dn="sys/rack-unit-1" adminPower="cycle-immediate"/>
</inConfig></configConfMo>
delegate_to: localhost
- name: Reconfigure IMC to boot from storage
imc_rest:
hostname: '{{ imc_host }}'
username: '{{ imc_username }}'
password: '{{ imc_password }}'
validate_certs: no
content: |
<configConfMo><inConfig>
<lsbootStorage dn="sys/rack-unit-1/boot-policy/storage-read-write" access="read-write" order="1" type="storage"/>
</inConfig></configConfMo>
delegate_to: localhost
- name: Add customer description to server
imc_rest:
hostname: '{{ imc_host }}'
username: '{{ imc_username }}'
password: '{{ imc_password }}'
validate_certs: no
content: |
<configConfMo><inConfig>
<computeRackUnit dn="sys/rack-unit-1" usrLbl="Customer Lab - POD{{ pod_id }} - {{ inventory_hostname_short }}"/>
</inConfig></configConfMo>
delegate_to: localhost
- name: Disable HTTP and increase session timeout to max value 10800 secs
imc_rest:
hostname: '{{ imc_host }}'
username: '{{ imc_username }}'
password: '{{ imc_password }}'
validate_certs: no
timeout: 120
content: |
<configConfMo><inConfig>
<commHttp dn="sys/svc-ext/http-svc" adminState="disabled"/>
</inConfig></configConfMo>
<configConfMo><inConfig>
<commHttps dn="sys/svc-ext/https-svc" adminState="enabled" sessionTimeout="10800"/>
</inConfig></configConfMo>
delegate_to: localhost
'''
RETURN = r'''
aaLogin:
description: Cisco IMC XML output for the login, translated to JSON using Cobra convention
returned: success
type: dict
sample: |
"attributes": {
"cookie": "",
"outCookie": "1498902428/9de6dc36-417c-157c-106c-139efe2dc02a",
"outPriv": "admin",
"outRefreshPeriod": "600",
"outSessionId": "114",
"outVersion": "2.0(13e)",
"response": "yes"
}
configConfMo:
description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention
returned: success
type: dict
sample: |
elapsed:
description: Elapsed time in seconds
returned: always
type: int
sample: 31
response:
description: HTTP response message, including content length
returned: always
type: string
sample: OK (729 bytes)
status:
description: The HTTP response status code
returned: always
type: dict
sample: 200
error:
description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention
returned: failed
type: dict
sample: |
"attributes": {
"cookie": "",
"errorCode": "ERR-xml-parse-error",
"errorDescr": "XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. ",
"invocationResult": "594",
"response": "yes"
}
error_code:
description: Cisco IMC error code
returned: failed
type: string
sample: ERR-xml-parse-error
error_text:
description: Cisco IMC error message
returned: failed
type: string
sample: |
XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.
input:
description: RAW XML input sent to the Cisco IMC, causing the error
returned: failed
type: string
sample: |
<configConfMo><inConfig><computeRackUnit dn="sys/rack-unit-1" admin_Power="down"/></inConfig></configConfMo>
output:
description: RAW XML output eceived from the Cisco IMC, with error details
returned: failed
type: string
sample: >
<error cookie=""
response="yes"
errorCode="ERR-xml-parse-error"
invocationResult="594"
errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/>
'''
import atexit
import datetime
import itertools
import os
try:
import lxml.etree
HAS_LXML_ETREE = True
except ImportError:
HAS_LXML_ETREE = False
try:
from xmljson import cobra
HAS_XMLJSON_COBRA = True
except ImportError:
HAS_XMLJSON_COBRA = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def imc_response(module, rawoutput, rawinput=''):
''' Handle IMC returned data '''
xmloutput = lxml.etree.fromstring(rawoutput)
result = cobra.data(xmloutput)
# Handle errors
if xmloutput.get('errorCode') and xmloutput.get('errorDescr'):
if rawinput:
result['input'] = rawinput
result['output'] = rawoutput
result['error_code'] = xmloutput.get('errorCode')
result['error_text'] = xmloutput.get('errorDescr')
module.fail_json(msg='Request failed: %(error_text)s' % result, **result)
return result
def logout(module, url, cookie, timeout):
''' Perform a logout, if needed '''
data = '<aaaLogout cookie="%s" inCookie="%s"/>' % (cookie, cookie)
resp, auth = fetch_url(module, url, data=data, method="POST", timeout=timeout)
def merge(one, two):
''' Merge two complex nested datastructures into one'''
if isinstance(one, dict) and isinstance(two, dict):
copy = dict(one)
# copy.update({key: merge(one.get(key, None), two[key]) for key in two})
copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two))
return copy
elif isinstance(one, list) and isinstance(two, list):
return [merge(alpha, beta) for (alpha, beta) in itertools.izip_longest(one, two)]
return one if two is None else two
def main():
module = AnsibleModule(
argument_spec=dict(
hostname=dict(type='str', required=True, aliases=['host', 'ip']),
username=dict(type='str', default='admin', aliases=['user']),
password=dict(type='str', default='password', no_log=True),
content=dict(type='str'),
path=dict(type='path', aliases=['config_file', 'src']),
protocol=dict(type='str', default='https', choices=['http', 'https']),
timeout=dict(type='int', default=60),
validate_certs=dict(type='bool', default=True),
),
supports_check_mode=True,
mutually_exclusive=[['content', 'path']],
)
if not HAS_LXML_ETREE:
module.fail_json(msg='module requires the lxml Python library installed on the managed host')
if not HAS_XMLJSON_COBRA:
module.fail_json(msg='module requires the xmljson (>= 0.1.8) Python library installed on the managed host')
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
content = module.params['content']
path = module.params['path']
protocol = module.params['protocol']
timeout = module.params['timeout']
result = dict(
failed=False,
changed=False,
)
# Report missing file
file_exists = False
if path:
if os.path.isfile(path):
file_exists = True
else:
module.fail_json(msg='Cannot find/access path:\n%s' % path)
start = datetime.datetime.utcnow()
# Perform login first
url = '%s://%s/nuova' % (protocol, hostname)
data = '<aaaLogin inName="%s" inPassword="%s"/>' % (username, password)
resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout)
if resp is None or auth['status'] != 200:
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result)
result.update(imc_response(module, resp.read()))
# Store cookie for future requests
try:
cookie = result['aaaLogin']['attributes']['outCookie']
except:
module.fail_json(msg='Could not find cookie in output', **result)
# If we would not log out properly, we run out of sessions quickly
atexit.register(logout, module, url, cookie, timeout)
# Prepare request data
if content:
rawdata = content
elif file_exists:
with open(path, 'r') as config_object:
rawdata = config_object.read()
# Wrap the XML documents in a <root> element
xmldata = lxml.etree.fromstring('<root>%s</root>' % rawdata.replace('\n', ''))
# Handle each XML document separately in the same session
for xmldoc in list(xmldata):
if xmldoc.tag is lxml.etree.Comment:
continue
# Add cookie to XML
xmldoc.set('cookie', cookie)
data = lxml.etree.tostring(xmldoc)
# Perform actual request
resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout)
if resp is None or info['status'] != 200:
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result)
# Merge results with previous results
rawoutput = resp.read()
result = merge(result, imc_response(module, rawoutput, rawinput=data))
result['response'] = info['msg']
result['status'] = info['status']
# Check for any changes
# NOTE: Unfortunately IMC API always report status as 'modified'
xmloutput = lxml.etree.fromstring(rawoutput)
results = xmloutput.xpath('/configConfMo/outConfig/*/@status')
result['changed'] = ('modified' in results)
# Report success
result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
liqi328/rjrepaircompany | django/core/exceptions.py | 292 | 2767 | """
Global Django exception and warning classes.
"""
class DjangoRuntimeWarning(RuntimeWarning):
pass
class ObjectDoesNotExist(Exception):
"The requested object does not exist"
silent_variable_failure = True
class MultipleObjectsReturned(Exception):
"The query returned multiple objects when only one was expected."
pass
class SuspiciousOperation(Exception):
"The user did something suspicious"
pass
class PermissionDenied(Exception):
"The user did not have permission to do that"
pass
class ViewDoesNotExist(Exception):
"The requested view does not exist"
pass
class MiddlewareNotUsed(Exception):
"This middleware is not used in this server configuration"
pass
class ImproperlyConfigured(Exception):
"Django is somehow improperly configured"
pass
class FieldError(Exception):
"""Some kind of problem with a model field."""
pass
NON_FIELD_ERRORS = '__all__'
class ValidationError(Exception):
"""An error while validating data."""
def __init__(self, message, code=None, params=None):
import operator
from django.utils.encoding import force_unicode
"""
ValidationError can be passed any object that can be printed (usually
a string), a list of objects or a dictionary.
"""
if isinstance(message, dict):
self.message_dict = message
# Reduce each list of messages into a single list.
message = reduce(operator.add, message.values())
if isinstance(message, list):
self.messages = [force_unicode(msg) for msg in message]
else:
self.code = code
self.params = params
message = force_unicode(message)
self.messages = [message]
def __str__(self):
# This is needed because, without a __str__(), printing an exception
# instance would result in this:
# AttributeError: ValidationError instance has no attribute 'args'
# See http://www.python.org/doc/current/tut/node10.html#handling
if hasattr(self, 'message_dict'):
return repr(self.message_dict)
return repr(self.messages)
def __repr__(self):
if hasattr(self, 'message_dict'):
return 'ValidationError(%s)' % repr(self.message_dict)
return 'ValidationError(%s)' % repr(self.messages)
def update_error_dict(self, error_dict):
if hasattr(self, 'message_dict'):
if error_dict:
for k, v in self.message_dict.items():
error_dict.setdefault(k, []).extend(v)
else:
error_dict = self.message_dict
else:
error_dict[NON_FIELD_ERRORS] = self.messages
return error_dict
| bsd-3-clause |
popazerty/dvbapp2-gui | lib/python/Tools/ASCIItranslit.py | 84 | 4273 | # -*- coding:utf-8 -*-
ASCIItranslit = { \
0x0022: "''", \
0x002A: "_", \
0x002F: "_", \
0x003A: "_", \
0x003C: "_", \
0x003D: "_", \
0x003E: "_", \
0x003F: "_", \
0x005C: "_", \
0x007C: "_", \
0x007F: "", \
0x00A0: "_", \
0x00A1: "!", \
0x00A2: "c", \
0x00A3: "lb", \
0x00A4: "", \
0x00A5: "yen", \
0x00A6: "I", \
0x00A7: "SS", \
0x00A8: "'", \
0x00A9: "(c)", \
0x00AA: "a", \
0x00AB: "<<", \
0x00AC: "not", \
0x00AD: "-", \
0x00AE: "(R)", \
0x00AF: "", \
0x00B0: "^0", \
0x00B1: "+-", \
0x00B2: "^2", \
0x00B3: "^3", \
0x00B4: "'", \
0x00B5: "u", \
0x00B6: "P", \
0x00B7: ".", \
0x00B8: ",", \
0x00B9: "^1", \
0x00BA: "o", \
0x00BB: ">>", \
0x00BC: "1_4 ", \
0x00BD: "1_2 ", \
0x00BE: "3_4 ", \
0x00BF: "_", \
0x00C0: "`A", \
0x00C1: "'A", \
0x00C2: "^A", \
0x00C3: "~A", \
0x00C4: "Ae", \
0x00C5: "A", \
0x00C6: "AE", \
0x00C7: "C", \
0x00C8: "`E", \
0x00C9: "'E", \
0x00CA: "^E", \
0x00CB: "E", \
0x00CC: "`I", \
0x00CD: "'I", \
0x00CE: "^I", \
0x00CF: "I", \
0x00D0: "D", \
0x00D1: "~N", \
0x00D2: "`O", \
0x00D3: "'O", \
0x00D4: "^O", \
0x00D5: "~O", \
0x00D6: "Oe", \
0x00D7: "x", \
0x00D8: "O", \
0x00D9: "`U", \
0x00DA: "'U", \
0x00DB: "^U", \
0x00DC: "Ue", \
0x00DD: "'Y", \
0x00DE: "Th", \
0x00DF: "ss", \
0x00E0: "`a", \
0x00E1: "'a", \
0x00E2: "^a", \
0x00E3: "~a", \
0x00E4: "AE", \
0x00E5: "a", \
0x00E6: "ae", \
0x00E7: "c", \
0x00E8: "`e", \
0x00E9: "'e", \
0x00EA: "^e", \
0x00EB: "e", \
0x00EC: "`i", \
0x00ED: "'i", \
0x00EE: "^i", \
0x00EF: "i", \
0x00F0: "d", \
0x00F1: "~n", \
0x00F2: "`o", \
0x00F3: "'o", \
0x00F4: "^o", \
0x00F5: "~o", \
0x00F6: "oe", \
0x00F7: "_", \
0x00F8: "o", \
0x00F9: "`u", \
0x00FA: "'u", \
0x00FB: "^u", \
0x00FC: "ue", \
0x00FD: "'y", \
0x00FE: "th", \
0x00FF: "Y", \
0x0100: "A", \
0x0101: "a", \
0x0102: "A", \
0x0103: "a", \
0x0104: "A", \
0x0105: "a", \
0x0106: "'C", \
0x0107: "'c", \
0x0108: "^C", \
0x0109: "^c", \
0x010A: "C", \
0x010B: "c", \
0x010C: "C", \
0x010D: "c", \
0x010E: "D", \
0x010F: "d", \
0x0110: "D", \
0x0111: "d", \
0x0112: "E", \
0x0113: "e", \
0x0114: "E", \
0x0115: "e", \
0x0116: "E", \
0x0117: "e", \
0x0118: "E", \
0x0119: "e", \
0x011A: "E", \
0x011B: "e", \
0x011C: "^G", \
0x011D: "^g", \
0x011E: "G", \
0x011F: "g", \
0x0120: "G", \
0x0121: "g", \
0x0122: "G", \
0x0123: "g", \
0x0124: "^H", \
0x0125: "^h", \
0x0126: "H", \
0x0127: "h", \
0x0128: "~I", \
0x0129: "~i", \
0x012A: "I", \
0x012B: "i", \
0x012C: "I", \
0x012D: "i", \
0x012E: "I", \
0x012F: "i", \
0x0130: "I", \
0x0131: "i", \
0x0132: "IJ", \
0x0133: "ij", \
0x0134: "^J", \
0x0135: "^j", \
0x0136: "K", \
0x0137: "k", \
0x0138: "", \
0x0139: "L", \
0x013A: "l", \
0x013B: "L", \
0x013C: "l", \
0x013D: "L", \
0x013E: "l", \
0x013F: "L", \
0x0140: "l", \
0x0141: "L", \
0x0142: "l", \
0x0143: "'N", \
0x0144: "'n", \
0x0145: "N", \
0x0146: "n", \
0x0147: "N", \
0x0148: "n", \
0x0149: "n", \
0x014A: "_", \
0x014B: "_", \
0x014C: "O", \
0x014D: "o", \
0x014E: "O", \
0x014F: "o", \
0x0150: "''o", \
0x0152: "OE", \
0x0153: "oe", \
0x0154: "'R", \
0x0155: "'r", \
0x0156: "R", \
0x0157: "r", \
0x0158: "R", \
0x0159: "r", \
0x015A: "'s", \
0x015B: "'s", \
0x015C: "^S", \
0x015D: "^s", \
0x015E: "S", \
0x015F: "s", \
0x0160: "S", \
0x0161: "s", \
0x0162: "T", \
0x0163: "t", \
0x0164: "T", \
0x0165: "t", \
0x0166: "T", \
0x0167: "t", \
0x0168: "~U", \
0x0169: "~u", \
0x016A: "U", \
0x016B: "u", \
0x016C: "U", \
0x016D: "u", \
0x016E: "U", \
0x016F: "u", \
0x0170: "''u", \
0x0172: "U", \
0x0173: "u", \
0x0174: "^W", \
0x0175: "^w", \
0x0176: "^Y", \
0x0177: "^y", \
0x0178: "Y", \
0x0179: "'Z", \
0x017A: "'z", \
0x017B: "Z", \
0x017C: "z", \
0x017D: "Z", \
0x017E: "z", \
0x017F: "s", \
0x018F: "_", \
0x0192: "f", \
0x01C4: "DZ", \
0x01C5: "DZ", \
0x01C6: "DZ", \
0x01C7: "LJ", \
0x01C8: "Lj", \
0x01C9: "lj", \
0x01CA: "NJ", \
0x01CB: "Nj", \
0x01CC: "nj", \
0x01F1: "DZ", \
0x01F2: "Dz", \
0x01F3: "dz", \
0x0218: "S", \
0x0219: "s", \
0x021A: "T", \
0x021B: "t", \
0x0259: "_", \
0x20AC: "EUR" }
def legacyEncode(string):
string2 = ""
for z, char in enumerate(string.decode("utf-8")):
i = ord(char)
if i < 33:
string2 += "_"
elif i in ASCIItranslit:
string2 += ASCIItranslit[i]
else:
try:
string2 += char.encode('ascii', 'strict')
except:
string2 += "_"
return string2.upper()
| gpl-2.0 |
commial/miasm | example/ida/symbol_exec.py | 3 | 5178 | from __future__ import print_function
import operator
from future.utils import viewitems
import idaapi
import idc
from miasm.expression.expression_helper import Variables_Identifier
from miasm.expression.expression import ExprAssign
from utils import expr2colorstr, translatorForm
class ActionHandler(idaapi.action_handler_t):
def activate(self, ctx):
view_index = get_focused_view()
if view_index is None:
return 1
self.custom_action(all_views[view_index])
return 1
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
class ActionHandlerExpand(ActionHandler):
def custom_action(self, view):
view.expand_expr()
class ActionHandlerTranslate(ActionHandler):
def custom_action(self, view):
view.translate_expr(view.GetLineNo())
class symbolicexec_t(idaapi.simplecustviewer_t):
def add(self, key, value):
self.AddLine("%s = %s" % (
expr2colorstr(
key,
loc_db=self.loc_db
),
expr2colorstr(
value,
loc_db=self.loc_db
)
))
def expand(self, linenum):
element = self.line2eq[linenum]
expanded = Variables_Identifier(element[1],
var_prefix="%s_v" % element[0])
self.line2eq = (
self.line2eq[0:linenum] +
list(viewitems(expanded.vars)) +
[(element[0], expanded.equation)] +
self.line2eq[linenum + 1:]
)
def print_lines(self):
self.ClearLines()
for element in self.line2eq:
self.add(*element)
self.Refresh()
def translate_expr(self, line_nb):
element = self.line2eq[line_nb]
expr = ExprAssign(*element)
form = translatorForm(expr)
form.Compile()
form.Execute()
def Create(self, equations, machine, loc_db, *args, **kwargs):
if not super(symbolicexec_t, self).Create(*args, **kwargs):
return False
self.machine = machine
self.loc_db = loc_db
self.line2eq = sorted(viewitems(equations), key=operator.itemgetter(0))
self.lines_expanded = set()
self.print_lines()
return True
def expand_expr(self):
self.expand(self.GetLineNo())
self.print_lines()
def OnPopupMenu(self, menu_id):
if menu_id == self.menu_expand:
self.expand(self.GetLineNo())
self.print_lines()
if menu_id == self.menu_translate:
self.translate_expr(self.GetLineNo())
return True
def OnKeydown(self, vkey, shift):
# ESCAPE
if vkey == 27:
self.Close()
return True
if vkey == ord('E'):
self.expand_expr()
if vkey == ord('T'):
self.translate_expr(self.GetLineNo())
return False
def get_focused_view():
for i, view in enumerate(all_views):
if view.IsFocused():
return i
return None
class Hooks(idaapi.UI_Hooks):
def finish_populating_tform_popup(self, form, popup):
idaapi.attach_action_to_popup(form, popup, 'my:expand', None)
idaapi.attach_action_to_popup(form, popup, 'my:translate', None)
def symbolic_exec():
from miasm.ir.symbexec import SymbolicExecutionEngine
from miasm.core.bin_stream_ida import bin_stream_ida
from utils import guess_machine
start, end = idc.SelStart(), idc.SelEnd()
bs = bin_stream_ida()
machine = guess_machine(addr=start)
mdis = machine.dis_engine(bs)
if start == idc.BADADDR and end == idc.BADADDR:
start = idc.ScreenEA()
end = idc.next_head(start) # Get next instruction address
mdis.dont_dis = [end]
asmcfg = mdis.dis_multiblock(start)
ira = machine.ira(loc_db=mdis.loc_db)
ircfg = ira.new_ircfg_from_asmcfg(asmcfg)
print("Run symbolic execution...")
sb = SymbolicExecutionEngine(ira, machine.mn.regs.regs_init)
sb.run_at(ircfg, start)
modified = {}
for dst, src in sb.modified(init_state=machine.mn.regs.regs_init):
modified[dst] = src
view = symbolicexec_t()
all_views.append(view)
if not view.Create(modified, machine, mdis.loc_db,
"Symbolic Execution - 0x%x to 0x%x"
% (start, idc.prev_head(end))):
return
view.Show()
# Support ida 6.9 and ida 7
all_views = []
hooks = Hooks()
hooks.hook()
action_expand = idaapi.action_desc_t(
'my:expand',
'Expand',
ActionHandlerExpand(),
'E',
'Expand expression',
50)
action_translate = idaapi.action_desc_t(
'my:translate',
'Translate',
ActionHandlerTranslate(),
'T',
'Translate expression in C/python/z3...',
103)
idaapi.register_action(action_expand)
idaapi.register_action(action_translate)
if __name__ == '__main__':
idaapi.CompileLine('static key_F3() { RunPythonStatement("symbolic_exec()"); }')
idc.AddHotkey("F3", "key_F3")
print("=" * 50)
print("""Available commands:
symbolic_exec() - F3: Symbolic execution of current selection
""")
| gpl-2.0 |
thica/ORCA-Remote | src/ORCA/widgets/Slider.py | 1 | 11336 | # -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Union
from xml.etree.ElementTree import Element
from kivy.uix.widget import Widget
from ORCA.utils.Atlas import ToAtlas
from ORCA.utils.LogError import LogError
from ORCA.utils.TypeConvert import ToFloat
from ORCA.utils.TypeConvert import ToUnicode
from ORCA.utils.XML import GetXMLBoolAttributeVar
from ORCA.utils.XML import GetXMLIntAttribute
from ORCA.utils.XML import GetXMLTextAttribute
from ORCA.utils.XML import GetXMLTextAttributeVar
from ORCA.vars.Replace import ReplaceVars
from ORCA.vars.Helpers import Round
from ORCA.vars.Access import SetVar
from ORCA.vars.Access import GetVar
from ORCA.widgets.base.Base import cWidgetBase
from ORCA.widgets.base.BaseBase import cWidgetBaseBase
from ORCA.widgets.base.BaseText import cWidgetBaseText
from ORCA.widgets.base.BaseAction import cWidgetBaseAction
from ORCA.widgets.core.SliderEx import cSliderEx
from ORCA.utils.FileName import cFileName
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ORCA.ScreenPage import cScreenPage
else:
from typing import TypeVar
cScreenPage = TypeVar("cScreenPage")
__all__ = ['cWidgetSlider']
class cWidgetSlider(cWidgetBase,cWidgetBaseText,cWidgetBaseAction,cWidgetBaseBase):
"""
WikiDoc:Doc
WikiDoc:Context:Widgets
WikiDoc:Page:Widgets-SLIDER
WikiDoc:TOCTitle:Slider
= Slider =
The slider widget provides the function of a slider. If you move the slider by mouse or by touch, an action is triggered. You can add a text to the slider knob as well
If you want to set the position of the slider (The Button Picture), you have to set [PREFIX]_value and the call the updatewidget action.
The following attributes are additional attributes to common widget and text attributes
<div style="overflow:auto; ">
{| class="wikitable"
! align="left" | Attribute
! align="left" | Description
|-
|type
|fixed: needs to be "SLIDER". Capital letters!
|-
|picturenormal
|The background picture to show. This pictrure will be shrinked to 25% of its height to have a transparent background
|-
|picturebutton
|The button picture to show. Could be a round or a square pricture
|-
|mindatavalue
|This is the value, which is assigned to the slider, when it reaches the left boundary. Data values are float values, so you can use small numbers as well.
|-
|maxdatavalue
|This is the value, which is assigned to the slider when it reaches the right boundary.
|-
|direction
|The direction of the slider: Could be eiter
* horizontal
* vertical
|-
|destvar
|This is the ''PREFIX'' for the variable, which holds the status of the slider. The slider widgets sets/updates the following variable, when it get moved:
* [PREFIX]_value: The value assigned to the slider position within the data range.
|-
|roundpos
|The position in digits, the [PREFIX]_value should be rounded. Examples: If the [PREFIX]_value is 10.623:
* "0" will round to 11
* "1" will round to 10.6
|-
|discardmoves
|If you set this attribute to "1", you just get a notification, when the user finishes moving the widget. If not set, you get (a lot of) notifications, while the user is moving the widget on the screen. Can be usefull, if you would like to avoid an interface being swamped by commands.
|}</div>
Below you see an example for a slider
<div style="overflow-x: auto;"><syntaxhighlight lang="xml">
<element name="Amp Volume Center" type="SLIDER" posx="center" posy="middle" width="%70" height="%75" picturenormal="background boxes" picturebutton="button round normal" action="Set Center Volume By Widget" mindatavalue="-12" maxdatavalue="12" destvar="volume_center" roundpos="0" orientation="vertical" discardmoves="1" fontsize='%w50' caption='icon:volume_up'/>
</syntaxhighlight></div>
WikiDoc:End
"""
# noinspection PyUnusedLocal
def __init__(self,**kwargs):
super().__init__()
self.oFnPictureNormal:Union[cFileName,None] = None
self.oFnPictureButton:Union[cFileName,None] = None
self.uDestVar:str = u'slider'
self.uDeviceOrientation:str = u'horizontal'
self.bDiscardMoves:bool = True
self.fMin:float = 0.0
self.fMax:float = 100.0
self.uMin:str = u''
self.uMax:str = u''
self.fValue:float = 0.0
self.fOldValue:float = 10000.23445
self.fDataRange:float = 100.0
self.iRoundPos:int = 0
def InitWidgetFromXml(self,*,oXMLNode:Element,oParentScreenPage:cScreenPage, uAnchor:str) -> bool:
""" Reads further Widget attributes from a xml node """
bRet=self.ParseXMLBaseNode(oXMLNode,oParentScreenPage , uAnchor)
if bRet:
self.oFnPictureNormal = cFileName(u'').ImportFullPath(uFnFullName=GetXMLTextAttributeVar(oXMLNode=oXMLNode,uTag=u'picturenormal',bMandatory= False,uDefault=u''))
self.oFnPictureButton = cFileName(u'').ImportFullPath(uFnFullName=GetXMLTextAttributeVar(oXMLNode=oXMLNode,uTag=u'picturebutton',bMandatory= False,uDefault=u''))
self.uMin = GetXMLTextAttribute(oXMLNode=oXMLNode, uTag=u'mindatavalue', bMandatory=False, vDefault=u'0.0')
self.uMax = GetXMLTextAttribute(oXMLNode=oXMLNode, uTag=u'maxdatavalue', bMandatory=False, vDefault=u'100.0')
self.uDestVar = GetXMLTextAttribute(oXMLNode=oXMLNode, uTag=u'destvar', bMandatory=False, vDefault=self.uDestVar)
self.iRoundPos = GetXMLIntAttribute(oXMLNode=oXMLNode, uTag=u'roundpos', bMandatory=False, iDefault=0) #roundpos: the position, the number should be rounded
self.uDeviceOrientation = GetXMLTextAttribute(oXMLNode=oXMLNode, uTag=u'orientation', bMandatory=False, vDefault=self.uDeviceOrientation)
self.bDiscardMoves = GetXMLBoolAttributeVar(oXMLNode=oXMLNode, uTag=u'discardmoves', bMandatory=False, bDefault=False)
self.fValue = self.fMin
return bRet
def Create(self, oParent: Widget) -> bool:
""" creates the Widget """
try:
self.fMin = ToFloat(ReplaceVars(self.uMin))
self.fMax = ToFloat(ReplaceVars(self.uMax))
self.AddArg('min', self.fMin)
self.AddArg('max', self.fMax)
self.AddArg('orientation', self.uDeviceOrientation)
self.AddArg('value', self.fMin)
self.AddArg('background_pic', ToAtlas(oFileName=self.oFnPictureNormal))
self.AddArg('button_pic', ToAtlas(oFileName=self.oFnPictureButton))
if self.CreateBase(Parent=oParent, Class=cSliderEx):
self.fDataRange=abs(self.fMax-self.fMin)
self.oObject.bind(on_slider_moved=self.OnNotifyChange)
# Capability to click on Knobs as well (needs to be implemented)
if not self.uActionName==u'':
self.oObject.bind(on_release=self.On_Button_Up)
self.oObject.bind(on_press =self.On_Button_Down)
self.oParent.add_widget(self.oObject)
self.UpdateWidget()
return True
return False
except Exception as e:
LogError ( uMsg=u'cWidgetSlider:Unexpected error Creating Object:',oException=e)
return False
def OnNotifyChange(self,instance):
""" will be called, when the slider will be moved """
if self.bDiscardMoves and (instance.uMoveType == u'move'):
return
if not self.bIsEnabled:
return
if not self.uDestVar==u'':
if self.fMin<self.fMax:
self.fValue=Round(self.oObject.value,self.iRoundPos)
else:
self.fValue=Round(self.fMax-self.oObject.value,self.iRoundPos)
if self.iRoundPos==0:
self.fValue=int(self.fValue)
self.UpdateVars()
if not self.uActionName==u'':
if self.fOldValue!=self.fValue:
self.fOldValue=self.fValue
self.On_Button_Up(instance)
def UpdateWidget(self) -> None:
""" Updates the silder pos, based on the assigned Var """
uValue:str
fMax:float
fMin:float
super().UpdateWidget()
if not self.uDestVar==u'':
uValue=GetVar(uVarName = self.uDestVar)
fNewValue=ToFloat(uValue)
if GetVar(uVarName=self.uMax) != u'':
fMax = ToFloat(GetVar(uVarName=self.uMax))
else:
fMax = self.fMax
if GetVar(uVarName=self.uMin) != u'':
fMin = ToFloat(GetVar(uVarName=self.uMin))
else:
fMin = self.fMin
if fNewValue>fMax:
fNewValue=fMax
if fNewValue<fMin:
fNewValue=fMin
if self.oObject:
self.oObject.SetValue(fNewValue)
self.fValue=Round(fNewValue,self.iRoundPos)
self.UpdateVars()
def UpdateVars(self):
""" Updates the vars, if the slider has been moved """
if not self.uDestVar==u'':
SetVar(uVarName = self.uDestVar, oVarValue = ToUnicode(self.fValue))
def SetMax(self,fMax):
""" Set the upper limit """
self.fMax=fMax
self.oObject.max=fMax
self.UpdateWidget()
def SetMin(self,fMin):
""" Set the lower limit """
self.fMin=fMin
self.oObject.min=fMin
self.UpdateWidget()
| gpl-3.0 |
Elico-Corp/odoo_OCB | addons/sale_service/tests/test_sale_service.py | 41 | 2127 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.addons.sale.tests.test_sale_common import TestSale
class TestSaleService(TestSale):
def test_sale_service(self):
""" Test task creation when confirming a so with the corresponding product """
prod_task = self.env.ref('product.product_product_1')
so_vals = {
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': prod_task.name, 'product_id': prod_task.id, 'product_uom_qty': 50, 'product_uom': prod_task.uom_id.id, 'price_unit': prod_task.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
}
so = self.env['sale.order'].create(so_vals)
so.action_confirm()
self.assertEqual(so.invoice_status, 'no', 'Sale Service: there should be nothing to invoice after validation')
# check task creation
project = self.env.ref('sale_service.project_GAP')
task = project.task_ids.filtered(lambda t: t.name == '%s:%s' % (so.name, prod_task.name))
self.assertTrue(task, 'Sale Service: task is not created')
self.assertEqual(task.partner_id, so.partner_id, 'Sale Service: customer should be the same on task and on SO')
# register timesheet on task
self.env['account.analytic.line'].create({
'name': 'Test Line',
'account_id': project.id,
'task_id': task.id,
'unit_amount': 50,
'user_id': self.manager.id,
'is_timesheet': True,
})
self.assertEqual(so.invoice_status, 'to invoice', 'Sale Service: there should be something to invoice after registering timesheets')
so.action_invoice_create()
line = so.order_line
self.assertTrue(line.product_uom_qty == line.qty_delivered == line.qty_invoiced, 'Sale Service: line should be invoiced completely')
self.assertEqual(so.invoice_status, 'invoiced', 'Sale Service: SO should be invoiced')
| agpl-3.0 |
tivek/conan | conans/server/service/service.py | 2 | 8746 | from conans.errors import RequestErrorException, NotFoundException, ForbiddenException
from conans.server.store.file_manager import FileManager
import os
import jwt
from conans.util.files import mkdir
from conans.model.ref import PackageReference
from conans.util.log import logger
class FileUploadDownloadService(object):
"""Handles authorization from token and upload and download files"""
def __init__(self, updown_auth_manager, base_store_folder):
self.updown_auth_manager = updown_auth_manager
self.base_store_folder = base_store_folder
def get_file_path(self, filepath, token):
try:
encoded_path, _, user = self.updown_auth_manager.get_resource_info(token)
if not self._valid_path(filepath, encoded_path):
logger.info("Invalid path file!! %s: %s" % (user, filepath))
raise NotFoundException("File not found")
logger.debug("Get file: user=%s path=%s" % (user, filepath))
file_path = os.path.normpath(os.path.join(self.base_store_folder, encoded_path))
return file_path
except (jwt.ExpiredSignature, jwt.DecodeError, AttributeError):
raise NotFoundException("File not found")
def put_file(self, file_saver, abs_filepath, token, upload_size):
"""
file_saver is an object with the save() method without parameters
"""
try:
encoded_path, filesize, user = self.updown_auth_manager.get_resource_info(token)
# Check size
if upload_size != filesize:
logger.debug("Invalid size file!!: %s: %s" % (user, abs_filepath))
raise RequestErrorException("Bad file size")
abs_encoded_path = os.path.abspath(os.path.join(self.base_store_folder, encoded_path))
if not self._valid_path(abs_filepath, abs_encoded_path):
raise NotFoundException("File not found")
logger.debug("Put file: %s: %s" % (user, abs_filepath))
mkdir(os.path.dirname(abs_filepath))
if os.path.exists(abs_filepath):
os.remove(abs_filepath)
file_saver.save(os.path.dirname(abs_filepath))
except (jwt.ExpiredSignature, jwt.DecodeError, AttributeError):
raise NotFoundException("File not found")
def _valid_path(self, filepath, encoded_path):
if encoded_path == filepath:
path = os.path.join(self.base_store_folder, encoded_path)
path = os.path.normpath(path)
# Protect from path outside storage "../.."
if not path.startswith(self.base_store_folder):
return False
return True
else:
return False
class SearchService(object):
def __init__(self, authorizer, search_manager, auth_user):
self._authorizer = authorizer
self._search_manager = search_manager
self._auth_user = auth_user
def search_packages(self, reference, query):
self._authorizer.check_read_conan(self._auth_user, reference)
info = self._search_manager.search_packages(reference, query)
return info
def search(self, pattern=None, ignorecase=True):
""" Get all the info about any package
Attributes:
pattern = wildcards like opencv/*
"""
references = self._search_manager.search(pattern, ignorecase)
filtered = []
# Filter out restricted items
for conan_ref in references:
try:
self._authorizer.check_read_conan(self._auth_user, conan_ref)
filtered.append(conan_ref)
except ForbiddenException:
pass
return filtered
class ConanService(object):
"""Handles authorization and expose methods for REST API"""
def __init__(self, authorizer, file_manager, auth_user):
assert(isinstance(file_manager, FileManager))
self._authorizer = authorizer
self._file_manager = file_manager
self._auth_user = auth_user
def get_conanfile_snapshot(self, reference):
"""Gets a dict with filepaths and the md5:
{filename: md5}
"""
self._authorizer.check_read_conan(self._auth_user, reference)
snap = self._file_manager.get_conanfile_snapshot(reference)
if not snap:
raise NotFoundException("conanfile not found")
return snap
def get_conanfile_download_urls(self, reference, files_subset=None):
"""Gets a dict with filepaths and the urls:
{filename: url}
"""
self._authorizer.check_read_conan(self._auth_user, reference)
urls = self._file_manager.get_download_conanfile_urls(reference,
files_subset,
self._auth_user)
if not urls:
raise NotFoundException("conanfile not found")
return urls
def get_conanfile_upload_urls(self, reference, filesizes):
_validate_conan_reg_filenames(list(filesizes.keys()))
self._authorizer.check_write_conan(self._auth_user, reference)
urls = self._file_manager.get_upload_conanfile_urls(reference,
filesizes,
self._auth_user)
return urls
def remove_conanfile(self, reference):
self._authorizer.check_delete_conan(self._auth_user, reference)
self._file_manager.remove_conanfile(reference)
def remove_packages(self, reference, package_ids_filter):
for package_id in package_ids_filter:
ref = PackageReference(reference, package_id)
self._authorizer.check_delete_package(self._auth_user, ref)
if not package_ids_filter: # Remove all packages, check that we can remove conanfile
self._authorizer.check_delete_conan(self._auth_user, reference)
self._file_manager.remove_packages(reference, package_ids_filter)
def remove_conanfile_files(self, reference, files):
self._authorizer.check_delete_conan(self._auth_user, reference)
self._file_manager.remove_conanfile_files(reference, files)
def remove_package_files(self, package_reference, files):
self._authorizer.check_delete_package(self._auth_user, package_reference)
self._file_manager.remove_package_files(package_reference, files)
# Package methods
def get_package_snapshot(self, package_reference):
"""Gets a list with filepaths and the urls and md5:
[filename: {'url': url, 'md5': md5}]
"""
self._authorizer.check_read_package(self._auth_user, package_reference)
snap = self._file_manager.get_package_snapshot(package_reference)
return snap
def get_package_download_urls(self, package_reference, files_subset=None):
"""Gets a list with filepaths and the urls and md5:
[filename: {'url': url, 'md5': md5}]
"""
self._authorizer.check_read_package(self._auth_user, package_reference)
urls = self._file_manager.get_download_package_urls(package_reference,
files_subset=files_subset)
return urls
def get_package_upload_urls(self, package_reference, filesizes):
"""
:param package_reference: PackageReference
:param filesizes: {filepath: bytes}
:return {filepath: url} """
try:
self._file_manager.get_conanfile_snapshot(package_reference.conan)
except NotFoundException:
raise NotFoundException("There are no remote conanfiles like %s"
% str(package_reference.conan))
self._authorizer.check_write_package(self._auth_user, package_reference)
urls = self._file_manager.get_upload_package_urls(package_reference,
filesizes, self._auth_user)
return urls
def _validate_conan_reg_filenames(files):
message = "Invalid conans request"
# Could be partial uploads, so we can't expect for all files to be present
# # conanfile and digest in files
# if CONANFILE not in files:
# # Log something
# raise RequestErrorException("Missing %s" % CONANFILE)
# if CONAN_MANIFEST not in files:
# # Log something
# raise RequestErrorException("Missing %s" % CONAN_MANIFEST)
# All contents in same directory (from conan_id)
for filename in files:
if ".." in filename:
# Log something
raise RequestErrorException(message)
| mit |
chhao91/QGIS | python/plugins/db_manager/dlg_sql_window.py | 5 | 12633 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
The content of this file is based on
- PG_Manager by Martin Dobias (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import Qt, QObject, QSettings, QByteArray, SIGNAL, pyqtSignal
from PyQt4.QtGui import QDialog, QWidget, QAction, QKeySequence, \
QDialogButtonBox, QApplication, QCursor, QMessageBox, QClipboard, QInputDialog, QIcon
from PyQt4.Qsci import QsciAPIs
from qgis.core import QgsProject
from .db_plugins.plugin import BaseError
from .dlg_db_error import DlgDbError
from .dlg_query_builder import QueryBuilderDlg
try:
from qgis.gui import QgsCodeEditorSQL
except:
from .sqledit import SqlEdit
from qgis import gui
gui.QgsCodeEditorSQL = SqlEdit
from .ui.ui_DlgSqlWindow import Ui_DbManagerDlgSqlWindow as Ui_Dialog
import re
class DlgSqlWindow(QWidget, Ui_Dialog):
nameChanged = pyqtSignal(str)
def __init__(self, iface, db, parent=None):
QWidget.__init__(self, parent)
self.iface = iface
self.db = db
self.setupUi(self)
self.setWindowTitle(
u"%s - %s [%s]" % (self.windowTitle(), db.connection().connectionName(), db.connection().typeNameString()))
self.defaultLayerName = 'QueryLayer'
self.editSql.setFocus()
self.editSql.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.initCompleter()
# allow to copy results
copyAction = QAction("copy", self)
self.viewResult.addAction(copyAction)
copyAction.setShortcuts(QKeySequence.Copy)
copyAction.triggered.connect(self.copySelectedResults)
self.btnExecute.clicked.connect(self.executeSql)
self.btnClear.clicked.connect(self.clearSql)
self.presetStore.clicked.connect(self.storePreset)
self.presetDelete.clicked.connect(self.deletePreset)
self.presetCombo.activated[str].connect(self.loadPreset)
self.presetCombo.activated[str].connect(self.presetName.setText)
self.updatePresetsCombobox()
# hide the load query as layer if feature is not supported
self._loadAsLayerAvailable = self.db.connector.hasCustomQuerySupport()
self.loadAsLayerGroup.setVisible(self._loadAsLayerAvailable)
if self._loadAsLayerAvailable:
self.layerTypeWidget.hide() # show if load as raster is supported
self.loadLayerBtn.clicked.connect(self.loadSqlLayer)
self.getColumnsBtn.clicked.connect(self.fillColumnCombos)
self.loadAsLayerGroup.toggled.connect(self.loadAsLayerToggled)
self.loadAsLayerToggled(False)
self._createViewAvailable = self.db.connector.hasCreateSpatialViewSupport()
self.btnCreateView.setVisible(self._createViewAvailable)
if self._createViewAvailable:
self.btnCreateView.clicked.connect(self.createView)
self.queryBuilderFirst = True
self.queryBuilderBtn.setIcon(QIcon(":/db_manager/icons/sql.gif"))
self.queryBuilderBtn.clicked.connect(self.displayQueryBuilder)
self.presetName.textChanged.connect(self.nameChanged)
def updatePresetsCombobox(self):
self.presetCombo.clear()
names = []
entries = QgsProject.instance().subkeyList('DBManager', 'savedQueries')
for entry in entries:
name = QgsProject.instance().readEntry('DBManager', 'savedQueries/' + entry + '/name')[0]
names.append(name)
for name in sorted(names):
self.presetCombo.addItem(name)
self.presetCombo.setCurrentIndex(-1)
def storePreset(self):
query = self._getSqlQuery()
if query == "":
return
name = self.presetName.text()
QgsProject.instance().writeEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()) + '/name', name)
QgsProject.instance().writeEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()) + '/query', query)
index = self.presetCombo.findText(name)
if index == -1:
self.presetCombo.addItem(name)
self.presetCombo.setCurrentIndex(self.presetCombo.count() - 1)
else:
self.presetCombo.setCurrentIndex(index)
def deletePreset(self):
name = self.presetCombo.currentText()
QgsProject.instance().removeEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()))
self.presetCombo.removeItem(self.presetCombo.findText(name))
self.presetCombo.setCurrentIndex(-1)
def loadPreset(self, name):
query = QgsProject.instance().readEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()) + '/query')[0]
name = QgsProject.instance().readEntry('DBManager', 'savedQueries/q' + unicode(name.__hash__()) + '/name')[0]
self.editSql.setText(query)
def loadAsLayerToggled(self, checked):
self.loadAsLayerGroup.setChecked(checked)
self.loadAsLayerWidget.setVisible(checked)
def clearSql(self):
self.editSql.clear()
self.editSql.setFocus()
def executeSql(self):
sql = self._getSqlQuery()
if sql == "":
return
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
# delete the old model
old_model = self.viewResult.model()
self.viewResult.setModel(None)
if old_model:
old_model.deleteLater()
self.uniqueCombo.clear()
self.geomCombo.clear()
try:
# set the new model
model = self.db.sqlResultModel(sql, self)
self.viewResult.setModel(model)
self.lblResult.setText(self.tr("%d rows, %.1f seconds") % (model.affectedRows(), model.secs()))
except BaseError as e:
QApplication.restoreOverrideCursor()
DlgDbError.showError(e, self)
return
cols = sorted(self.viewResult.model().columnNames())
self.uniqueCombo.addItems(cols)
self.geomCombo.addItems(cols)
self.update()
QApplication.restoreOverrideCursor()
def loadSqlLayer(self):
hasUniqueField = self.uniqueColumnCheck.checkState() == Qt.Checked
if hasUniqueField:
uniqueFieldName = self.uniqueCombo.currentText()
else:
uniqueFieldName = None
hasGeomCol = self.hasGeometryCol.checkState() == Qt.Checked
if hasGeomCol:
geomFieldName = self.geomCombo.currentText()
else:
geomFieldName = None
query = self._getSqlQuery()
if query == "":
return
# remove a trailing ';' from query if present
if query.strip().endswith(';'):
query = query.strip()[:-1]
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
from qgis.core import QgsMapLayer, QgsMapLayerRegistry
layerType = QgsMapLayer.VectorLayer if self.vectorRadio.isChecked() else QgsMapLayer.RasterLayer
# get a new layer name
names = []
for layer in QgsMapLayerRegistry.instance().mapLayers().values():
names.append(layer.name())
layerName = self.layerNameEdit.text()
if layerName == "":
layerName = self.defaultLayerName
newLayerName = layerName
index = 1
while newLayerName in names:
index += 1
newLayerName = u"%s_%d" % (layerName, index)
# create the layer
layer = self.db.toSqlLayer(query, geomFieldName, uniqueFieldName, newLayerName, layerType,
self.avoidSelectById.isChecked())
if layer.isValid():
QgsMapLayerRegistry.instance().addMapLayers([layer], True)
QApplication.restoreOverrideCursor()
def fillColumnCombos(self):
query = self._getSqlQuery()
if query == "":
return
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
self.uniqueCombo.clear()
self.geomCombo.clear()
# get a new alias
aliasIndex = 0
while True:
alias = "_%s__%d" % ("subQuery", aliasIndex)
escaped = re.compile('\\b("?)' + re.escape(alias) + '\\1\\b')
if not escaped.search(query):
break
aliasIndex += 1
# remove a trailing ';' from query if present
if query.strip().endswith(';'):
query = query.strip()[:-1]
# get all the columns
cols = []
connector = self.db.connector
sql = u"SELECT * FROM (%s\n) AS %s LIMIT 0" % (unicode(query), connector.quoteId(alias))
c = None
try:
c = connector._execute(None, sql)
cols = connector._get_cursor_columns(c)
except BaseError as e:
QApplication.restoreOverrideCursor()
DlgDbError.showError(e, self)
return
finally:
if c:
c.close()
del c
# get sensible default columns. do this before sorting in case there's hints in the column order (eg, id is more likely to be first)
try:
defaultGeomCol = next(col for col in cols if col in ['geom', 'geometry', 'the_geom', 'way'])
except:
defaultGeomCol = None
try:
defaultUniqueCol = [col for col in cols if 'id' in col][0]
except:
defaultUniqueCol = None
cols.sort()
self.uniqueCombo.addItems(cols)
self.geomCombo.addItems(cols)
# set sensible default columns
try:
self.geomCombo.setCurrentIndex(cols.index(defaultGeomCol))
except:
pass
try:
self.uniqueCombo.setCurrentIndex(cols.index(defaultUniqueCol))
except:
pass
QApplication.restoreOverrideCursor()
def copySelectedResults(self):
if len(self.viewResult.selectedIndexes()) <= 0:
return
model = self.viewResult.model()
# convert to string using tab as separator
text = model.headerToString("\t")
for idx in self.viewResult.selectionModel().selectedRows():
text += "\n" + model.rowToString(idx.row(), "\t")
QApplication.clipboard().setText(text, QClipboard.Selection)
QApplication.clipboard().setText(text, QClipboard.Clipboard)
def initCompleter(self):
dictionary = None
if self.db:
dictionary = self.db.connector.getSqlDictionary()
if not dictionary:
# use the generic sql dictionary
from .sql_dictionary import getSqlDictionary
dictionary = getSqlDictionary()
wordlist = []
for name, value in dictionary.iteritems():
wordlist += value # concat lists
wordlist = list(set(wordlist)) # remove duplicates
api = QsciAPIs(self.editSql.lexer())
for word in wordlist:
api.add(word)
api.prepare()
self.editSql.lexer().setAPIs(api)
def displayQueryBuilder(self):
dlg = QueryBuilderDlg(self.iface, self.db, self, reset=self.queryBuilderFirst)
self.queryBuilderFirst = False
r = dlg.exec_()
if r == QDialog.Accepted:
self.editSql.setText(dlg.query)
def createView(self):
name, ok = QInputDialog.getText(None, "View name", "View name")
if ok:
try:
self.db.connector.createSpatialView(name, self._getSqlQuery())
except BaseError as e:
DlgDbError.showError(e, self)
def _getSqlQuery(self):
sql = self.editSql.selectedText()
if len(sql) == 0:
sql = self.editSql.text()
return sql
| gpl-2.0 |
stephentyrone/swift | utils/gyb_syntax_support/GenericNodes.py | 13 | 3268 | from .Child import Child
from .Node import Node # noqa: I201
GENERIC_NODES = [
# generic-where-clause -> 'where' requirement-list
Node('GenericWhereClause', kind='Syntax',
children=[
Child('WhereKeyword', kind='WhereToken'),
Child('RequirementList', kind='GenericRequirementList',
collection_element_name='Requirement'),
]),
Node('GenericRequirementList', kind='SyntaxCollection',
element='GenericRequirement',
element_name='GenericRequirement'),
# generic-requirement ->
# (same-type-requrement|conformance-requirement) ','?
Node('GenericRequirement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Body', kind='Syntax',
node_choices=[
Child('SameTypeRequirement',
kind='SameTypeRequirement'),
Child('ConformanceRequirement',
kind='ConformanceRequirement'),
]),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# same-type-requirement -> type-identifier == type
Node('SameTypeRequirement', kind='Syntax',
children=[
Child('LeftTypeIdentifier', kind='Type'),
Child('EqualityToken', kind='Token',
token_choices=[
'SpacedBinaryOperatorToken',
'UnspacedBinaryOperatorToken',
'PrefixOperatorToken',
'PostfixOperatorToken',
]),
Child('RightTypeIdentifier', kind='Type'),
]),
Node('GenericParameterList', kind='SyntaxCollection',
element='GenericParameter'),
# generic-parameter -> type-name
# | type-name : type-identifier
# | type-name : protocol-composition-type
Node('GenericParameter', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Name', kind='IdentifierToken'),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('InheritedType', kind='Type',
is_optional=True),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# generic-parameter-clause -> '<' generic-parameter-list '>'
Node('GenericParameterClause', kind='Syntax',
children=[
Child('LeftAngleBracket', kind='LeftAngleToken'),
Child('GenericParameterList', kind='GenericParameterList',
collection_element_name='GenericParameter'),
Child('RightAngleBracket', kind='RightAngleToken'),
]),
# conformance-requirement -> type-identifier : type-identifier
Node('ConformanceRequirement', kind='Syntax',
children=[
Child('LeftTypeIdentifier', kind='Type'),
Child('Colon', kind='ColonToken'),
Child('RightTypeIdentifier', kind='Type'),
]),
]
| apache-2.0 |
manevant/django-oscar | src/oscar/apps/dashboard/partners/views.py | 4 | 10537 | from django.contrib import messages
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse_lazy, reverse
from django.shortcuts import get_object_or_404, redirect
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from django.views import generic
from oscar.apps.customer.utils import normalise_email
from oscar.core.loading import get_classes, get_model
from oscar.core.compat import get_user_model
from oscar.views import sort_queryset
User = get_user_model()
Partner = get_model('partner', 'Partner')
(
PartnerSearchForm, PartnerCreateForm, PartnerAddressForm,
NewUserForm, UserEmailForm, ExistingUserForm
) = get_classes(
'dashboard.partners.forms',
['PartnerSearchForm', 'PartnerCreateForm', 'PartnerAddressForm',
'NewUserForm', 'UserEmailForm', 'ExistingUserForm'])
class PartnerListView(generic.ListView):
model = Partner
context_object_name = 'partners'
template_name = 'dashboard/partners/partner_list.html'
form_class = PartnerSearchForm
def get_queryset(self):
qs = self.model._default_manager.all()
qs = sort_queryset(qs, self.request, ['name'])
self.description = _("All partners")
# We track whether the queryset is filtered to determine whether we
# show the search form 'reset' button.
self.is_filtered = False
self.form = self.form_class(self.request.GET)
if not self.form.is_valid():
return qs
data = self.form.cleaned_data
if data['name']:
qs = qs.filter(name__icontains=data['name'])
self.description = _("Partners matching '%s'") % data['name']
self.is_filtered = True
return qs
def get_context_data(self, **kwargs):
ctx = super(PartnerListView, self).get_context_data(**kwargs)
ctx['queryset_description'] = self.description
ctx['form'] = self.form
ctx['is_filtered'] = self.is_filtered
return ctx
class PartnerCreateView(generic.CreateView):
model = Partner
template_name = 'dashboard/partners/partner_form.html'
form_class = PartnerCreateForm
success_url = reverse_lazy('dashboard:partner-list')
def get_context_data(self, **kwargs):
ctx = super(PartnerCreateView, self).get_context_data(**kwargs)
ctx['title'] = _('Create new partner')
return ctx
def get_success_url(self):
messages.success(self.request,
_("Partner '%s' was created successfully.") %
self.object.name)
return reverse('dashboard:partner-list')
class PartnerManageView(generic.UpdateView):
"""
This multi-purpose view renders out a form to edit the partner's details,
the associated address and a list of all associated users.
"""
template_name = 'dashboard/partners/partner_manage.html'
form_class = PartnerAddressForm
success_url = reverse_lazy('dashboard:partner-list')
def get_object(self, queryset=None):
self.partner = get_object_or_404(Partner, pk=self.kwargs['pk'])
address = self.partner.primary_address
if address is None:
address = self.partner.addresses.model(partner=self.partner)
return address
def get_initial(self):
return {'name': self.partner.name}
def get_context_data(self, **kwargs):
ctx = super(PartnerManageView, self).get_context_data(**kwargs)
ctx['partner'] = self.partner
ctx['title'] = self.partner.name
ctx['users'] = self.partner.users.all()
return ctx
def form_valid(self, form):
messages.success(
self.request, _("Partner '%s' was updated successfully.") %
self.partner.name)
self.partner.name = form.cleaned_data['name']
self.partner.save()
return super(PartnerManageView, self).form_valid(form)
class PartnerDeleteView(generic.DeleteView):
model = Partner
template_name = 'dashboard/partners/partner_delete.html'
def get_success_url(self):
messages.success(self.request,
_("Partner '%s' was deleted successfully.") %
self.object.name)
return reverse('dashboard:partner-list')
# =============
# Partner users
# =============
class PartnerUserCreateView(generic.CreateView):
model = User
template_name = 'dashboard/partners/partner_user_form.html'
form_class = NewUserForm
def dispatch(self, request, *args, **kwargs):
self.partner = get_object_or_404(
Partner, pk=kwargs.get('partner_pk', None))
return super(PartnerUserCreateView, self).dispatch(
request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(PartnerUserCreateView, self).get_context_data(**kwargs)
ctx['partner'] = self.partner
ctx['title'] = _('Create user')
return ctx
def get_form_kwargs(self):
kwargs = super(PartnerUserCreateView, self).get_form_kwargs()
kwargs['partner'] = self.partner
return kwargs
def get_success_url(self):
name = self.object.get_full_name() or self.object.email
messages.success(self.request,
_("User '%s' was created successfully.") % name)
return reverse('dashboard:partner-list')
class PartnerUserSelectView(generic.ListView):
template_name = 'dashboard/partners/partner_user_select.html'
form_class = UserEmailForm
context_object_name = 'users'
def dispatch(self, request, *args, **kwargs):
self.partner = get_object_or_404(
Partner, pk=kwargs.get('partner_pk', None))
return super(PartnerUserSelectView, self).dispatch(
request, *args, **kwargs)
def get(self, request, *args, **kwargs):
data = None
if 'email' in request.GET:
data = request.GET
self.form = self.form_class(data)
return super(PartnerUserSelectView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(PartnerUserSelectView, self).get_context_data(**kwargs)
ctx['partner'] = self.partner
ctx['form'] = self.form
return ctx
def get_queryset(self):
if self.form.is_valid():
email = normalise_email(self.form.cleaned_data['email'])
return User.objects.filter(email__icontains=email)
else:
return User.objects.none()
class PartnerUserLinkView(generic.View):
def get(self, request, user_pk, partner_pk):
# need to allow GET to make Undo link in PartnerUserUnlinkView work
return self.post(request, user_pk, partner_pk)
def post(self, request, user_pk, partner_pk):
user = get_object_or_404(User, pk=user_pk)
name = user.get_full_name() or user.email
partner = get_object_or_404(Partner, pk=partner_pk)
if self.link_user(user, partner):
messages.success(
request,
_("User '%(name)s' was linked to '%(partner_name)s'")
% {'name': name, 'partner_name': partner.name})
else:
messages.info(
request,
_("User '%(name)s' is already linked to '%(partner_name)s'")
% {'name': name, 'partner_name': partner.name})
return redirect('dashboard:partner-manage', pk=partner_pk)
def link_user(self, user, partner):
"""
Links a user to a partner, and adds the dashboard permission if needed.
Returns False if the user was linked already; True otherwise.
"""
if partner.users.filter(pk=user.pk).exists():
return False
partner.users.add(user)
if not user.is_staff:
dashboard_access_perm = Permission.objects.get(
codename='dashboard_access',
content_type__app_label='partner')
user.user_permissions.add(dashboard_access_perm)
return True
class PartnerUserUnlinkView(generic.View):
def unlink_user(self, user, partner):
"""
Unlinks a user from a partner, and removes the dashboard permission
if they are not linked to any other partners.
Returns False if the user was not linked to the partner; True
otherwise.
"""
if not partner.users.filter(pk=user.pk).exists():
return False
partner.users.remove(user)
if not user.is_staff and not user.partners.exists():
user.user_permissions.filter(
codename='dashboard_access',
content_type__app_label='partner').delete()
return True
def post(self, request, user_pk, partner_pk):
user = get_object_or_404(User, pk=user_pk)
name = user.get_full_name() or user.email
partner = get_object_or_404(Partner, pk=partner_pk)
if self.unlink_user(user, partner):
msg = render_to_string(
'dashboard/partners/messages/user_unlinked.html',
{'user_name': name,
'partner_name': partner.name,
'user_pk': user_pk,
'partner_pk': partner_pk})
messages.success(self.request, msg, extra_tags='safe noicon')
else:
messages.error(
request,
_("User '%(name)s' is not linked to '%(partner_name)s'") %
{'name': name, 'partner_name': partner.name})
return redirect('dashboard:partner-manage', pk=partner_pk)
# =====
# Users
# =====
class PartnerUserUpdateView(generic.UpdateView):
template_name = 'dashboard/partners/partner_user_form.html'
form_class = ExistingUserForm
def get_object(self, queryset=None):
return get_object_or_404(User,
pk=self.kwargs['user_pk'],
partners__pk=self.kwargs['partner_pk'])
def get_context_data(self, **kwargs):
ctx = super(PartnerUserUpdateView, self).get_context_data(**kwargs)
name = self.object.get_full_name() or self.object.email
ctx['title'] = _("Edit user '%s'") % name
return ctx
def get_success_url(self):
name = self.object.get_full_name() or self.object.email
messages.success(self.request,
_("User '%s' was updated successfully.") % name)
return reverse('dashboard:partner-list')
| bsd-3-clause |
gomex/hntool | lib/hntool/util.py | 1 | 1481 | #
# hntool - utility functions
# Copyright (C) 2009 Hugo Doria <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
import os
import sys
import re
# Functions
def is_root():
'''Method to check if hntool is running as root.'''
if os.getuid() == 0:
return True
def is_unix():
'''Method to check if we have power'''
if os.name == 'posix':
return True
return False
def term_len():
return int(os.popen('stty size', 'r').read().split()[1])
def split_len(seq, length):
result = []
p = re.compile("(.{,"+str(length)+"})\s")
while len(seq) > 0:
if len(seq) < length:
result.append(seq)
break
else:
tmp,seq = (p.split(seq,1))[1:]
result.append(tmp)
return result
| gpl-2.0 |
ciex/motor | lib/werkzeug/contrib/wrappers.py | 92 | 10254 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.wrappers
~~~~~~~~~~~~~~~~~~~~~~~~~
Extra wrappers or mixins contributed by the community. These wrappers can
be mixed in into request objects to add extra functionality.
Example::
from werkzeug.wrappers import Request as RequestBase
from werkzeug.contrib.wrappers import JSONRequestMixin
class Request(RequestBase, JSONRequestMixin):
pass
Afterwards this request object provides the extra functionality of the
:class:`JSONRequestMixin`.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import codecs
from werkzeug.exceptions import BadRequest
from werkzeug.utils import cached_property
from werkzeug.http import dump_options_header, parse_options_header
from werkzeug._internal import _decode_unicode
try:
from simplejson import loads
except ImportError:
from json import loads
def is_known_charset(charset):
"""Checks if the given charset is known to Python."""
try:
codecs.lookup(charset)
except LookupError:
return False
return True
class JSONRequestMixin(object):
"""Add json method to a request object. This will parse the input data
through simplejson if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not json or if the data itself cannot be parsed as json.
"""
@cached_property
def json(self):
"""Get the result of simplejson.loads if possible."""
if 'json' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a JSON request')
try:
return loads(self.data)
except Exception:
raise BadRequest('Unable to read JSON request')
class ProtobufRequestMixin(object):
"""Add protobuf parsing method to a request object. This will parse the
input data through `protobuf`_ if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not protobuf or if the data itself cannot be parsed property.
.. _protobuf: http://code.google.com/p/protobuf/
"""
#: by default the :class:`ProtobufRequestMixin` will raise a
#: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
#: initialized. You can bypass that check by setting this
#: attribute to `False`.
protobuf_check_initialization = True
def parse_protobuf(self, proto_type):
"""Parse the data into an instance of proto_type."""
if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a Protobuf request')
obj = proto_type()
try:
obj.ParseFromString(self.data)
except Exception:
raise BadRequest("Unable to parse Protobuf request")
# Fail if not all required fields are set
if self.protobuf_check_initialization and not obj.IsInitialized():
raise BadRequest("Partial Protobuf request")
return obj
class RoutingArgsRequestMixin(object):
"""This request mixin adds support for the wsgiorg routing args
`specification`_.
.. _specification: http://www.wsgi.org/wsgi/Specifications/routing_args
"""
def _get_routing_args(self):
return self.environ.get('wsgiorg.routing_args', (()))[0]
def _set_routing_args(self, value):
if self.shallow:
raise RuntimeError('A shallow request tried to modify the WSGI '
'environment. If you really want to do that, '
'set `shallow` to False.')
self.environ['wsgiorg.routing_args'] = (value, self.routing_vars)
routing_args = property(_get_routing_args, _set_routing_args, doc='''
The positional URL arguments as `tuple`.''')
del _get_routing_args, _set_routing_args
def _get_routing_vars(self):
rv = self.environ.get('wsgiorg.routing_args')
if rv is not None:
return rv[1]
rv = {}
if not self.shallow:
self.routing_vars = rv
return rv
def _set_routing_vars(self, value):
if self.shallow:
raise RuntimeError('A shallow request tried to modify the WSGI '
'environment. If you really want to do that, '
'set `shallow` to False.')
self.environ['wsgiorg.routing_args'] = (self.routing_args, value)
routing_vars = property(_get_routing_vars, _set_routing_vars, doc='''
The keyword URL arguments as `dict`.''')
del _get_routing_vars, _set_routing_vars
class ReverseSlashBehaviorRequestMixin(object):
"""This mixin reverses the trailing slash behavior of :attr:`script_root`
and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin`
directly on the paths.
Because it changes the behavior or :class:`Request` this class has to be
mixed in *before* the actual request class::
class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
pass
This example shows the differences (for an application mounted on
`/application` and the request going to `/application/foo/bar`):
+---------------+-------------------+---------------------+
| | normal behavior | reverse behavior |
+===============+===================+=====================+
| `script_root` | ``/application`` | ``/application/`` |
+---------------+-------------------+---------------------+
| `path` | ``/foo/bar`` | ``foo/bar`` |
+---------------+-------------------+---------------------+
"""
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will not include a leading slash.
"""
path = (self.environ.get('PATH_INFO') or '').lstrip('/')
return _decode_unicode(path, self.charset, self.encoding_errors)
@cached_property
def script_root(self):
"""The root path of the script includling a trailing slash."""
path = (self.environ.get('SCRIPT_NAME') or '').rstrip('/') + '/'
return _decode_unicode(path, self.charset, self.encoding_errors)
class DynamicCharsetRequestMixin(object):
""""If this mixin is mixed into a request class it will provide
a dynamic `charset` attribute. This means that if the charset is
transmitted in the content type headers it's used from there.
Because it changes the behavior or :class:`Request` this class has
to be mixed in *before* the actual request class::
class MyRequest(DynamicCharsetRequestMixin, Request):
pass
By default the request object assumes that the URL charset is the
same as the data charset. If the charset varies on each request
based on the transmitted data it's not a good idea to let the URLs
change based on that. Most browsers assume either utf-8 or latin1
for the URLs if they have troubles figuring out. It's strongly
recommended to set the URL charset to utf-8::
class MyRequest(DynamicCharsetRequestMixin, Request):
url_charset = 'utf-8'
.. versionadded:: 0.6
"""
#: the default charset that is assumed if the content type header
#: is missing or does not contain a charset parameter. The default
#: is latin1 which is what HTTP specifies as default charset.
#: You may however want to set this to utf-8 to better support
#: browsers that do not transmit a charset for incoming data.
default_charset = 'latin1'
def unknown_charset(self, charset):
"""Called if a charset was provided but is not supported by
the Python codecs module. By default latin1 is assumed then
to not lose any information, you may override this method to
change the behavior.
:param charset: the charset that was not found.
:return: the replacement charset.
"""
return 'latin1'
@cached_property
def charset(self):
"""The charset from the content type."""
header = self.environ.get('CONTENT_TYPE')
if header:
ct, options = parse_options_header(header)
charset = options.get('charset')
if charset:
if is_known_charset(charset):
return charset
return self.unknown_charset(charset)
return self.default_charset
class DynamicCharsetResponseMixin(object):
"""If this mixin is mixed into a response class it will provide
a dynamic `charset` attribute. This means that if the charset is
looked up and stored in the `Content-Type` header and updates
itself automatically. This also means a small performance hit but
can be useful if you're working with different charsets on
responses.
Because the charset attribute is no a property at class-level, the
default value is stored in `default_charset`.
Because it changes the behavior or :class:`Response` this class has
to be mixed in *before* the actual response class::
class MyResponse(DynamicCharsetResponseMixin, Response):
pass
.. versionadded:: 0.6
"""
#: the default charset.
default_charset = 'utf-8'
def _get_charset(self):
header = self.headers.get('content-type')
if header:
charset = parse_options_header(header)[1].get('charset')
if charset:
return charset
return self.default_charset
def _set_charset(self, charset):
header = self.headers.get('content-type')
ct, options = parse_options_header(header)
if not ct:
raise TypeError('Cannot set charset if Content-Type '
'header is missing.')
options['charset'] = charset
self.headers['Content-Type'] = dump_options_header(ct, options)
charset = property(_get_charset, _set_charset, doc="""
The charset for the response. It's stored inside the
Content-Type header as a parameter.""")
del _get_charset, _set_charset
| apache-2.0 |
RoadRunnr/net-next | tools/perf/scripts/python/syscall-counts-by-pid.py | 1996 | 2105 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
dizballanze/breaking-point | breaking_point/breaking_point.py | 1 | 1494 | import timeit
def find_breaking_point(f1, f2, input_generator, start=1, step=1,
limit=1000000, trial_count=1000, repeat_count=3):
"""
Find size of input arguments (n0) for which f2(n0) is faster than f1(n0).
- f1, f2 - functions to test.
- input_generator - function that receives current size of input arguments and returns input data in form of tuple with first item - list of non-keyword arguments and second item - dict of keyword arguments.
- start - initial input data size.
- step - iteration step.
- limit - maximum size of input data.
- trial_count - count of executions of f1/f2 on each iteration.
- repeat_count - to repeat trials several times and use average performance value.
returns n0 - size of input data for which f2(n0) is faster than f1(n0)
or None if reaches limit.
"""
for n in range(start, limit+1):
curr_input = input_generator(n)
# Test first function
f1_results = timeit.repeat(lambda: f1(*curr_input[0], **curr_input[1]),
repeat=repeat_count, number=trial_count)
f1_avg = sum(f1_results) / len(f1_results)
# Test second function
f2_results = timeit.repeat(lambda: f2(*curr_input[0], **curr_input[1]), repeat=repeat_count, number=trial_count)
f2_avg = sum(f2_results) / len(f2_results)
# Compare performance
if f2_avg < f1_avg:
return n
return None
| mit |
Ban3/Limnoria | plugins/AutoMode/config.py | 6 | 4477 | ###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('AutoMode')
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified themself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('AutoMode', True)
AutoMode = conf.registerPlugin('AutoMode')
conf.registerChannelValue(AutoMode, 'enable',
registry.Boolean(True, _("""Determines whether this plugin is enabled.
""")))
conf.registerGlobalValue(AutoMode, 'owner',
registry.Boolean(False, _("""Determines whether this plugin will automode
owners even if they don't have op/halfop/voice/whatever capability.""")))
conf.registerChannelValue(AutoMode, 'alternativeCapabilities',
registry.Boolean(True, _("""Determines whether the bot will
check for 'alternative capabilities' (ie. autoop, autohalfop,
autovoice) in addition to/instead of classic ones.""")))
conf.registerChannelValue(AutoMode, 'fallthrough',
registry.Boolean(True, _("""Determines whether the bot will "fall
through" to halfop/voicing when auto-opping is turned off but
auto-halfopping/voicing are turned on.""")))
conf.registerChannelValue(AutoMode, 'op',
registry.Boolean(False, _("""Determines whether the bot will automatically
op people with the <channel>,op capability when they join the channel.
""")))
conf.registerChannelValue(AutoMode, 'halfop',
registry.Boolean(False, _("""Determines whether the bot will automatically
halfop people with the <channel>,halfop capability when they join the
channel.""")))
conf.registerChannelValue(AutoMode, 'voice',
registry.Boolean(False, _("""Determines whether the bot will automatically
voice people with the <channel>,voice capability when they join the
channel.""")))
conf.registerChannelValue(AutoMode, 'ban',
registry.Boolean(True, _("""Determines whether the bot will automatically
ban people who join the channel and are on the banlist.""")))
conf.registerChannelValue(AutoMode.ban, 'period',
registry.PositiveInteger(86400, _("""Determines how many seconds the bot
will automatically ban a person when banning.""")))
conf.registerChannelValue(AutoMode, 'delay',
registry.Integer(0, _("""Determines how many seconds the bot will wait
before applying a mode. Has no effect on bans.""")))
conf.registerChannelValue(AutoMode, 'extra',
registry.SpaceSeparatedListOfStrings([], _("""Extra modes that will be
applied to a user. Example syntax: user1+o-v user2+v user3-v""")))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
nrc/servo | tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/dom.py | 1229 | 1457 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
import gettext
_ = gettext.gettext
from . import _base
class TreeWalker(_base.NonRecursiveTreeWalker):
def getNodeDetails(self, node):
if node.nodeType == Node.DOCUMENT_TYPE_NODE:
return _base.DOCTYPE, node.name, node.publicId, node.systemId
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
return _base.TEXT, node.nodeValue
elif node.nodeType == Node.ELEMENT_NODE:
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
if attr.namespaceURI:
attrs[(attr.namespaceURI, attr.localName)] = attr.value
else:
attrs[(None, attr.name)] = attr.value
return (_base.ELEMENT, node.namespaceURI, node.nodeName,
attrs, node.hasChildNodes())
elif node.nodeType == Node.COMMENT_NODE:
return _base.COMMENT, node.nodeValue
elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
return (_base.DOCUMENT,)
else:
return _base.UNKNOWN, node.nodeType
def getFirstChild(self, node):
return node.firstChild
def getNextSibling(self, node):
return node.nextSibling
def getParentNode(self, node):
return node.parentNode
| mpl-2.0 |
noroutine/ansible | lib/ansible/utils/module_docs_fragments/validate.py | 366 | 1146 | # Copyright (c) 2015 Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = '''
options:
validate:
required: false
description:
- The validation command to run before copying into place. The path to the file to
validate is passed in via '%s' which must be present as in the example below.
The command is passed securely so shell features like expansion and pipes won't work.
default: None
'''
| gpl-3.0 |
cornell-brg/pydgin | riscv/isa_RV32D.py | 2 | 7317 | #=======================================================================
# isa_RV32D.py
#=======================================================================
'RISC-V instructions for the double-precision floating point extension.'
from utils import sext_xlen, sext_32, sext, signed, trim, fp_neg
from pydgin.utils import trim_32, r_ulonglong
from helpers import *
import softfloat as sfp
#=======================================================================
# Instruction Encodings
#=======================================================================
encodings = [
['fld', 'xxxxxxxxxxxxxxxxx011xxxxx0000111'],
['fsd', 'xxxxxxxxxxxxxxxxx011xxxxx0100111'],
['fmadd_d', 'xxxxx01xxxxxxxxxxxxxxxxxx1000011'],
['fmsub_d', 'xxxxx01xxxxxxxxxxxxxxxxxx1000111'],
['fnmsub_d', 'xxxxx01xxxxxxxxxxxxxxxxxx1001011'],
['fnmadd_d', 'xxxxx01xxxxxxxxxxxxxxxxxx1001111'],
['fadd_d', '0000001xxxxxxxxxxxxxxxxxx1010011'],
['fsub_d', '0000101xxxxxxxxxxxxxxxxxx1010011'],
['fmul_d', '0001001xxxxxxxxxxxxxxxxxx1010011'],
['fdiv_d', '0001101xxxxxxxxxxxxxxxxxx1010011'],
['fsqrt_d', '010110100000xxxxxxxxxxxxx1010011'],
['fsgnj_d', '0010001xxxxxxxxxx000xxxxx1010011'],
['fsgnjn_d', '0010001xxxxxxxxxx001xxxxx1010011'],
['fsgnjx_d', '0010001xxxxxxxxxx010xxxxx1010011'],
['fmin_d', '0010101xxxxxxxxxx000xxxxx1010011'],
['fmax_d', '0010101xxxxxxxxxx001xxxxx1010011'],
['fcvt_s_d', '010000000001xxxxxxxxxxxxx1010011'],
['fcvt_d_s', '010000100000xxxxxxxxxxxxx1010011'],
['feq_d', '1010001xxxxxxxxxx010xxxxx1010011'],
['flt_d', '1010001xxxxxxxxxx001xxxxx1010011'],
['fle_d', '1010001xxxxxxxxxx000xxxxx1010011'],
['fclass_d', '111000100000xxxxx001xxxxx1010011'],
['fcvt_w_d', '110000100000xxxxxxxxxxxxx1010011'],
['fcvt_wu_d', '110000100001xxxxxxxxxxxxx1010011'],
['fcvt_d_w', '110100100000xxxxxxxxxxxxx1010011'],
['fcvt_d_wu', '110100100001xxxxxxxxxxxxx1010011'],
]
#=======================================================================
# Instruction Definitions
#=======================================================================
def execute_fld( s, inst ):
# TODO: make memory support 64-bit ops
addr = trim_64( s.rf[inst.rs1] + inst.i_imm )
s.fp[inst.rd] = ( s.mem.read( addr+4, 4 ) << 32 ) \
| s.mem.read( addr, 4 )
s.pc += 4
def execute_fsd( s, inst ):
addr = trim_64( s.rf[inst.rs1] + inst.s_imm )
s.mem.write( addr, 4, trim_32( s.fp[inst.rs2] ) )
s.mem.write( addr+4, 4, trim_32( s.fp[inst.rs2] >> 32 ) )
s.pc += 4
def execute_fmadd_d( s, inst ):
a, b, c = s.fp[inst.rs1], s.fp[inst.rs2], s.fp[inst.rs3]
s.fp[ inst.rd ] = sfp.f64_mulAdd( a, b, c )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fmsub_d( s, inst ):
a, b, c = s.fp[inst.rs1], s.fp[inst.rs2], s.fp[inst.rs3]
s.fp[ inst.rd ] = sfp.f64_mulAdd( a, b, fp_neg(c,64) )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fnmsub_d( s, inst ):
a, b, c = s.fp[inst.rs1], s.fp[inst.rs2], s.fp[inst.rs3]
s.fp[ inst.rd ] = sfp.f64_mulAdd( fp_neg(a,64), b, c )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fnmadd_d( s, inst ):
a, b, c = s.fp[inst.rs1], s.fp[inst.rs2], s.fp[inst.rs3]
s.fp[ inst.rd ] = sfp.f64_mulAdd( fp_neg(a,64), b, fp_neg(c,64) )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fadd_d( s, inst ):
a, b = s.fp[inst.rs1], s.fp[inst.rs2]
s.fp[ inst.rd ] = sfp.f64_add( a, b )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fsub_d( s, inst ):
a, b = s.fp[inst.rs1], s.fp[inst.rs2]
s.fp[ inst.rd ] = sfp.f64_sub( a, b )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fmul_d( s, inst ):
a, b = s.fp[inst.rs1], s.fp[inst.rs2]
s.fp[ inst.rd ] = sfp.f64_mul( a, b )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fdiv_d( s, inst ):
a, b = s.fp[inst.rs1], s.fp[inst.rs2]
s.fp[ inst.rd ] = sfp.f64_div( a, b )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fsqrt_d( s, inst ):
a = s.fp[inst.rs1]
s.fp[ inst.rd ] = sfp.f64_sqrt( a )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fsgnj_d( s, inst ):
sign_mask = r_ulonglong( 1 << 63 )
body_mask = sign_mask - 1
a, b = s.fp[inst.rs1], s.fp[inst.rs2]
s.fp[inst.rd] = (b & sign_mask) | (a & body_mask)
s.pc += 4
def execute_fsgnjn_d( s, inst ):
sign_mask = r_ulonglong( 1 << 63 )
body_mask = sign_mask - 1
a, b = s.fp[inst.rs1], s.fp[inst.rs2]
s.fp[inst.rd] = (~b & sign_mask) | (a & body_mask)
s.pc += 4
def execute_fsgnjx_d( s, inst ):
sign_mask = r_ulonglong( 1 << 63 )
body_mask = sign_mask - 1
a, b = s.fp[inst.rs1], s.fp[inst.rs2]
s.fp[inst.rd] = (b & sign_mask) ^ a
s.pc += 4
def execute_fmin_d( s, inst ):
a, b = s.fp[inst.rs1], s.fp[inst.rs2]
# TODO: s.fp[ inst.rd ] = sfp.isNaNF64UI(b) || ...
s.fp[ inst.rd ] = a if sfp.f64_lt_quiet(a,b) else b
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fmax_d( s, inst ):
a, b = s.fp[inst.rs1], s.fp[inst.rs2]
# TODO: s.fp[ inst.rd ] = sfp.isNaNF64UI(b) || ...
s.fp[ inst.rd ] = a if sfp.f64_le_quiet(b,a) else b
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fcvt_s_d( s, inst ):
s.fp[inst.rd] = sfp.f64_to_f32( s.fp[inst.rs1] )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fcvt_d_s( s, inst ):
s.fp[inst.rd] = sfp.f32_to_f64( trim_32(s.fp[inst.rs1]) )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_feq_d( s, inst ):
a, b = s.fp[inst.rs1], s.fp[inst.rs2]
s.rf[ inst.rd ] = sfp.f64_eq( a, b )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_flt_d( s, inst ):
a, b = s.fp[inst.rs1], s.fp[inst.rs2]
s.rf[ inst.rd ] = sfp.f64_lt( a, b )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fle_d( s, inst ):
a, b = s.fp[inst.rs1], s.fp[inst.rs2]
s.rf[ inst.rd ] = sfp.f64_le( a, b )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fclass_d( s, inst ):
s.rf[inst.rd] = sfp.f64_classify( s.fp[inst.rs1] )
s.pc += 4
def execute_fcvt_w_d( s, inst ):
s.rf[inst.rd] = sext_32(sfp.f64_to_i32( s.fp[inst.rs1], inst.rm, True ))
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fcvt_wu_d( s, inst ):
s.rf[inst.rd] = sext_32(sfp.f64_to_ui32( s.fp[inst.rs1], inst.rm, True ))
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fcvt_d_w( s, inst ):
a = signed( s.rf[inst.rs1], 32 )
s.fp[inst.rd] = sfp.i32_to_f64( a )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
def execute_fcvt_d_wu( s, inst ):
a = trim_32(s.rf[inst.rs1])
s.fp[inst.rd] = sfp.ui32_to_f64( a )
s.fcsr = sfp.get_flags()
sfp.set_flags( 0 )
s.pc += 4
| bsd-3-clause |
OneBitSoftware/jwtSample | src/Spa/env1/Lib/site-packages/flask/testsuite/__init__.py | 564 | 7022 | # -*- coding: utf-8 -*-
"""
flask.testsuite
~~~~~~~~~~~~~~~
Tests Flask itself. The majority of Flask is already tested
as part of Werkzeug.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import os
import sys
import flask
import warnings
import unittest
from functools import update_wrapper
from contextlib import contextmanager
from werkzeug.utils import import_string, find_modules
from flask._compat import reraise, StringIO
def add_to_path(path):
"""Adds an entry to sys.path if it's not already there. This does
not append it but moves it to the front so that we can be sure it
is loaded.
"""
if not os.path.isdir(path):
raise RuntimeError('Tried to add nonexisting path')
def _samefile(x, y):
if x == y:
return True
try:
return os.path.samefile(x, y)
except (IOError, OSError, AttributeError):
# Windows has no samefile
return False
sys.path[:] = [x for x in sys.path if not _samefile(path, x)]
sys.path.insert(0, path)
def iter_suites():
"""Yields all testsuites."""
for module in find_modules(__name__):
mod = import_string(module)
if hasattr(mod, 'suite'):
yield mod.suite()
def find_all_tests(suite):
"""Yields all the tests and their names from a given suite."""
suites = [suite]
while suites:
s = suites.pop()
try:
suites.extend(s)
except TypeError:
yield s, '%s.%s.%s' % (
s.__class__.__module__,
s.__class__.__name__,
s._testMethodName
)
@contextmanager
def catch_warnings():
"""Catch warnings in a with block in a list"""
# make sure deprecation warnings are active in tests
warnings.simplefilter('default', category=DeprecationWarning)
filters = warnings.filters
warnings.filters = filters[:]
old_showwarning = warnings.showwarning
log = []
def showwarning(message, category, filename, lineno, file=None, line=None):
log.append(locals())
try:
warnings.showwarning = showwarning
yield log
finally:
warnings.filters = filters
warnings.showwarning = old_showwarning
@contextmanager
def catch_stderr():
"""Catch stderr in a StringIO"""
old_stderr = sys.stderr
sys.stderr = rv = StringIO()
try:
yield rv
finally:
sys.stderr = old_stderr
def emits_module_deprecation_warning(f):
def new_f(self, *args, **kwargs):
with catch_warnings() as log:
f(self, *args, **kwargs)
self.assert_true(log, 'expected deprecation warning')
for entry in log:
self.assert_in('Modules are deprecated', str(entry['message']))
return update_wrapper(new_f, f)
class FlaskTestCase(unittest.TestCase):
"""Baseclass for all the tests that Flask uses. Use these methods
for testing instead of the camelcased ones in the baseclass for
consistency.
"""
def ensure_clean_request_context(self):
# make sure we're not leaking a request context since we are
# testing flask internally in debug mode in a few cases
leaks = []
while flask._request_ctx_stack.top is not None:
leaks.append(flask._request_ctx_stack.pop())
self.assert_equal(leaks, [])
def setup(self):
pass
def teardown(self):
pass
def setUp(self):
self.setup()
def tearDown(self):
unittest.TestCase.tearDown(self)
self.ensure_clean_request_context()
self.teardown()
def assert_equal(self, x, y):
return self.assertEqual(x, y)
def assert_raises(self, exc_type, callable=None, *args, **kwargs):
catcher = _ExceptionCatcher(self, exc_type)
if callable is None:
return catcher
with catcher:
callable(*args, **kwargs)
def assert_true(self, x, msg=None):
self.assertTrue(x, msg)
def assert_false(self, x, msg=None):
self.assertFalse(x, msg)
def assert_in(self, x, y):
self.assertIn(x, y)
def assert_not_in(self, x, y):
self.assertNotIn(x, y)
if sys.version_info[:2] == (2, 6):
def assertIn(self, x, y):
assert x in y, "%r unexpectedly not in %r" % (x, y)
def assertNotIn(self, x, y):
assert x not in y, "%r unexpectedly in %r" % (x, y)
class _ExceptionCatcher(object):
def __init__(self, test_case, exc_type):
self.test_case = test_case
self.exc_type = exc_type
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
exception_name = self.exc_type.__name__
if exc_type is None:
self.test_case.fail('Expected exception of type %r' %
exception_name)
elif not issubclass(exc_type, self.exc_type):
reraise(exc_type, exc_value, tb)
return True
class BetterLoader(unittest.TestLoader):
"""A nicer loader that solves two problems. First of all we are setting
up tests from different sources and we're doing this programmatically
which breaks the default loading logic so this is required anyways.
Secondly this loader has a nicer interpolation for test names than the
default one so you can just do ``run-tests.py ViewTestCase`` and it
will work.
"""
def getRootSuite(self):
return suite()
def loadTestsFromName(self, name, module=None):
root = self.getRootSuite()
if name == 'suite':
return root
all_tests = []
for testcase, testname in find_all_tests(root):
if testname == name or \
testname.endswith('.' + name) or \
('.' + name + '.') in testname or \
testname.startswith(name + '.'):
all_tests.append(testcase)
if not all_tests:
raise LookupError('could not find test case for "%s"' % name)
if len(all_tests) == 1:
return all_tests[0]
rv = unittest.TestSuite()
for test in all_tests:
rv.addTest(test)
return rv
def setup_path():
add_to_path(os.path.abspath(os.path.join(
os.path.dirname(__file__), 'test_apps')))
def suite():
"""A testsuite that has all the Flask tests. You can use this
function to integrate the Flask tests into your own testsuite
in case you want to test that monkeypatches to Flask do not
break it.
"""
setup_path()
suite = unittest.TestSuite()
for other_suite in iter_suites():
suite.addTest(other_suite)
return suite
def main():
"""Runs the testsuite as command line application."""
try:
unittest.main(testLoader=BetterLoader(), defaultTest='suite')
except Exception as e:
print('Error: %s' % e)
| mit |
pe-suke/ansible | lib/ansible/plugins/connections/accelerate.py | 140 | 15736 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import base64
import socket
import struct
import time
from ansible.callbacks import vvv, vvvv
from ansible.errors import AnsibleError, AnsibleFileNotFound
from . import ConnectionBase
from .ssh import Connection as SSHConnection
from .paramiko_ssh import Connection as ParamikoConnection
from ansible import utils
from ansible import constants
# the chunk size to read and send, assuming mtu 1500 and
# leaving room for base64 (+33%) encoding and header (8 bytes)
# ((1400-8)/4)*3) = 1044
# which leaves room for the TCP/IP header. We set this to a
# multiple of the value to speed up file reads.
CHUNK_SIZE=1044*20
class Connection(ConnectionBase):
''' raw socket accelerated connection '''
def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
self.runner = runner
self.host = host
self.context = None
self.conn = None
self.user = user
self.key = utils.key_for_hostname(host)
self.port = port[0]
self.accport = port[1]
self.is_connected = False
self.has_pipelining = False
self.become_methods_supported=['sudo']
if not self.port:
self.port = constants.DEFAULT_REMOTE_PORT
elif not isinstance(self.port, int):
self.port = int(self.port)
if not self.accport:
self.accport = constants.ACCELERATE_PORT
elif not isinstance(self.accport, int):
self.accport = int(self.accport)
if self.runner.original_transport == "paramiko":
self.ssh = ParamikoConnection(
runner=self.runner,
host=self.host,
port=self.port,
user=self.user,
password=password,
private_key_file=private_key_file
)
else:
self.ssh = SSHConnection(
runner=self.runner,
host=self.host,
port=self.port,
user=self.user,
password=password,
private_key_file=private_key_file
)
if not getattr(self.ssh, 'shell', None):
self.ssh.shell = utils.plugins.shell_loader.get('sh')
# attempt to work around shared-memory funness
if getattr(self.runner, 'aes_keys', None):
utils.AES_KEYS = self.runner.aes_keys
@property
def transport(self):
"""String used to identify this Connection class from other classes"""
return 'accelerate'
def _execute_accelerate_module(self):
args = "password=%s port=%s minutes=%d debug=%d ipv6=%s" % (
base64.b64encode(self.key.__str__()),
str(self.accport),
constants.ACCELERATE_DAEMON_TIMEOUT,
int(utils.VERBOSITY),
self.runner.accelerate_ipv6,
)
if constants.ACCELERATE_MULTI_KEY:
args += " multi_key=yes"
inject = dict(password=self.key)
if getattr(self.runner, 'accelerate_inventory_host', False):
inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.runner.accelerate_inventory_host))
else:
inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
vvvv("attempting to start up the accelerate daemon...")
self.ssh.connect()
tmp_path = self.runner._make_tmp_path(self.ssh)
return self.runner._execute_module(self.ssh, tmp_path, 'accelerate', args, inject=inject)
def connect(self, allow_ssh=True):
''' activates the connection object '''
try:
if not self.is_connected:
wrong_user = False
tries = 3
self.conn = socket.socket()
self.conn.settimeout(constants.ACCELERATE_CONNECT_TIMEOUT)
vvvv("attempting connection to %s via the accelerated port %d" % (self.host,self.accport))
while tries > 0:
try:
self.conn.connect((self.host,self.accport))
break
except socket.error:
vvvv("connection to %s failed, retrying..." % self.host)
time.sleep(0.1)
tries -= 1
if tries == 0:
vvv("Could not connect via the accelerated connection, exceeded # of tries")
raise AnsibleError("FAILED")
elif wrong_user:
vvv("Restarting daemon with a different remote_user")
raise AnsibleError("WRONG_USER")
self.conn.settimeout(constants.ACCELERATE_TIMEOUT)
if not self.validate_user():
# the accelerated daemon was started with a
# different remote_user. The above command
# should have caused the accelerate daemon to
# shutdown, so we'll reconnect.
wrong_user = True
except AnsibleError as e:
if allow_ssh:
if "WRONG_USER" in e:
vvv("Switching users, waiting for the daemon on %s to shutdown completely..." % self.host)
time.sleep(5)
vvv("Falling back to ssh to startup accelerated mode")
res = self._execute_accelerate_module()
if not res.is_successful():
raise AnsibleError("Failed to launch the accelerated daemon on %s (reason: %s)" % (self.host,res.result.get('msg')))
return self.connect(allow_ssh=False)
else:
raise AnsibleError("Failed to connect to %s:%s" % (self.host,self.accport))
self.is_connected = True
return self
def send_data(self, data):
packed_len = struct.pack('!Q',len(data))
return self.conn.sendall(packed_len + data)
def recv_data(self):
header_len = 8 # size of a packed unsigned long long
data = b""
try:
vvvv("%s: in recv_data(), waiting for the header" % self.host)
while len(data) < header_len:
d = self.conn.recv(header_len - len(data))
if not d:
vvvv("%s: received nothing, bailing out" % self.host)
return None
data += d
vvvv("%s: got the header, unpacking" % self.host)
data_len = struct.unpack('!Q',data[:header_len])[0]
data = data[header_len:]
vvvv("%s: data received so far (expecting %d): %d" % (self.host,data_len,len(data)))
while len(data) < data_len:
d = self.conn.recv(data_len - len(data))
if not d:
vvvv("%s: received nothing, bailing out" % self.host)
return None
vvvv("%s: received %d bytes" % (self.host, len(d)))
data += d
vvvv("%s: received all of the data, returning" % self.host)
return data
except socket.timeout:
raise AnsibleError("timed out while waiting to receive data")
def validate_user(self):
'''
Checks the remote uid of the accelerated daemon vs. the
one specified for this play and will cause the accel
daemon to exit if they don't match
'''
vvvv("%s: sending request for validate_user" % self.host)
data = dict(
mode='validate_user',
username=self.user,
)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("Failed to send command to %s" % self.host)
vvvv("%s: waiting for validate_user response" % self.host)
while True:
# we loop here while waiting for the response, because a
# long running command may cause us to receive keepalive packets
# ({"pong":"true"}) rather than the response we want.
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if "pong" in response:
# it's a keepalive, go back to waiting
vvvv("%s: received a keepalive packet" % self.host)
continue
else:
vvvv("%s: received the validate_user response: %s" % (self.host, response))
break
if response.get('failed'):
return False
else:
return response.get('rc') == 0
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the remote host '''
if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
if executable == "":
executable = constants.DEFAULT_EXECUTABLE
if self.runner.become and sudoable:
cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
vvv("EXEC COMMAND %s" % cmd)
data = dict(
mode='command',
cmd=cmd,
tmp_path=tmp_path,
executable=executable,
)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("Failed to send command to %s" % self.host)
while True:
# we loop here while waiting for the response, because a
# long running command may cause us to receive keepalive packets
# ({"pong":"true"}) rather than the response we want.
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if "pong" in response:
# it's a keepalive, go back to waiting
vvvv("%s: received a keepalive packet" % self.host)
continue
else:
vvvv("%s: received the response" % self.host)
break
return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
fd = file(in_path, 'rb')
fstat = os.stat(in_path)
try:
vvv("PUT file is %d bytes" % fstat.st_size)
last = False
while fd.tell() <= fstat.st_size and not last:
vvvv("file position currently %ld, file size is %ld" % (fd.tell(), fstat.st_size))
data = fd.read(CHUNK_SIZE)
if fd.tell() >= fstat.st_size:
last = True
data = dict(mode='put', data=base64.b64encode(data), out_path=out_path, last=last)
if self.runner.become:
data['user'] = self.runner.become_user
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to send the file to %s" % self.host)
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if response.get('failed',False):
raise AnsibleError("failed to put the file in the requested location")
finally:
fd.close()
vvvv("waiting for final response after PUT")
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if response.get('failed',False):
raise AnsibleError("failed to put the file in the requested location")
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
data = dict(mode='fetch', in_path=in_path)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to initiate the file fetch with %s" % self.host)
fh = open(out_path, "w")
try:
bytes = 0
while True:
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if response.get('failed', False):
raise AnsibleError("Error during file fetch, aborting")
out = base64.b64decode(response['data'])
fh.write(out)
bytes += len(out)
# send an empty response back to signify we
# received the last chunk without errors
data = utils.jsonify(dict())
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to send ack during file fetch")
if response.get('last', False):
break
finally:
# we don't currently care about this final response,
# we just receive it and drop it. It may be used at some
# point in the future or we may just have the put/fetch
# operations not send back a final response at all
response = self.recv_data()
vvv("FETCH wrote %d bytes to %s" % (bytes, out_path))
fh.close()
def close(self):
''' terminate the connection '''
# Be a good citizen
try:
self.conn.close()
except:
pass
| gpl-3.0 |
neilpelow/wmap-django | venv/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/latin1prober.py | 1778 | 5232 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
/ total)
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.73
return confidence
| gpl-3.0 |
DreadPirateRobert/stock_visualiser | stock_visualiser_virtualenv/lib/python3.5/site-packages/pip/_vendor/distlib/_backport/tarfile.py | 422 | 92628 | #-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <[email protected]>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
"""Read from and write to tar format archives.
"""
__version__ = "$Revision$"
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel ([email protected])"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import stat
import errno
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# WindowsError (1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (WindowsError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
_open = builtins.open # Since 'open' is TarFile.open
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname"))
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0o120000 # symbolic link
S_IFREG = 0o100000 # regular file
S_IFBLK = 0o060000 # block device
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFIFO = 0o010000 # fifo
TSUID = 0o4000 # set UID on execution
TSGID = 0o2000 # set GID on execution
TSVTX = 0o1000 # reserved
TUREAD = 0o400 # read by owner
TUWRITE = 0o200 # write by owner
TUEXEC = 0o100 # execute/search by owner
TGREAD = 0o040 # read by group
TGWRITE = 0o020 # write by group
TGEXEC = 0o010 # execute/search by group
TOREAD = 0o004 # read by other
TOWRITE = 0o002 # write by other
TOEXEC = 0o001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name in ("nt", "ce"):
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0o200):
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0
for i in range(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = bytearray()
for i in range(digits - 1):
s.insert(0, n & 0o377)
n >>= 8
s.insert(0, 0o200)
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
while True:
buf = src.read(16*1024)
if not buf:
break
dst.write(buf)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadable tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile(object):
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream(object):
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\037\213\010"):
return "gz"
if self.buf.startswith(b"BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = b""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
self.buf += data
x += len(data)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def seekable(self):
if not hasattr(self.fileobj, "seekable"):
# XXX gzip.GzipFile and bz2.BZ2File
return True
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
tarinfo.sparse)
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = b""
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = b""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = b""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
# XXX TextIOWrapper uses the read1() method.
read1 = read
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = b""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8")
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf8", "utf8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf8", "utf8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=' ')
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=' ')
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=' ')
else:
print("%10d" % tarinfo.size, end=' ')
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=' ')
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=' ')
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=' ')
print()
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
target = bltn_open(targetpath, "wb")
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
if tarinfo.issym():
linkpath = os.path.join(os.path.dirname(tarinfo.name),
tarinfo.linkname)
else:
linkpath = tarinfo.linkname
else:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError as e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError as e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter(object):
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def __next__(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
next = __next__ # for Python 2.x
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
| mit |
christophelec/github3.py | tests/integration/test_repos_commit.py | 10 | 1282 | """Integration tests for Repository Commit objects."""
import github3
from . import helper
class TestRepoCommit(helper.IntegrationHelper):
"""Integration tests for the RepoCommit object."""
def test_statuses(self):
"""Test the ability to retrieve statuses on a commit."""
cassette_name = self.cassette_name('statuses')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
commit = repository.commit(
'29eaea046b353723f80a4810e3f2ea9d16ea6c25'
)
statuses = list(commit.statuses())
for status in statuses:
assert isinstance(status, github3.repos.status.Status)
def test_comments(self):
"""Test the ability to retrieve comments on a commit."""
cassette_name = self.cassette_name('comments')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('octocat', 'Hello-World')
commit = repository.commit(
'553c2077f0edc3d5dc5d17262f6aa498e69d6f8e'
)
comments = list(commit.comments())
for comment in comments:
assert isinstance(comment, github3.repos.comment.RepoComment)
| bsd-3-clause |
Antiun/c2c-rd-addons | purchase_landed_costs/__openerp__.py | 4 | 2193 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH (<http://www.camptocamp.at>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{ 'sequence': 500,
'name': 'Landed Costs',
'version': '0.8',
'category': 'Warehouse Management',
'description': """
This module add the possibility to include landed costs in the average price computation.
The landed costs can be defined for
* purchase orders
* purchase order lines (disabled in v7 due to o2m restrictions)
costs defined for purchase orders and pickings will be distributed according to the distribution type
defined in landed cost category
* value - example custom fees
* quantity - example freight
for each landed cost position a draft invoice is created in validation of purchase order
the products used to define landed cost must be classified "Distribution Type" as
* "Value" (for customs) or
* "Quantity" (for freight)
""",
'author': 'ChriCar Beteiligungs- und Beratungs- GmbH',
'depends': ['purchase' ],
'data': ['security/ir.model.access.csv',
'purchase_view.xml',
'stock_view.xml',
],
'demo_xml': [],
'installable': False,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
JamieFBousfield/heekscnc | nc/anilam_crusader_m.py | 34 | 3760 | # Preliminary postprocessor support for Anilam Crusader M CNC controller
# This code modified from iso.py and emc2.py distriuted with HeeksCAD as of Sep 2010
# Kurt Jensen 6 Sep 2010
# Use at your own risk.
import nc
import iso
class Creator(iso.Creator):
def init(self):
iso.Creator.init(self)
self.arc_centre_absolute = True
def SPACE(self): return(' ')
# This version of COMMENT removes comments from the resultant GCode
# Note: The Anilam hates comments when importing code.
def COMMENT(self,comment): return('')
def program_begin(self, id, comment):
self.write('%\n'); # Start of file token that Anilam Crusader M likes
# No Comments for the Anilam crusaher M, please......
#self.write( ('(' + comment + ')' + '\n') )
def program_end(self):
self.write_blocknum()
self.write('G29E\n') # End of code signal for Anilam Crusader M
self.write('%\n') # EOF signal for Anilam Crusader M
############################################################################
## Settings
def imperial(self):
self.write_blocknum()
self.write( self.IMPERIAL() + '\n')
self.fmt.number_of_decimal_places = 4
def metric(self):
self.write_blocknum()
self.write( self.METRIC() + '\n' )
self.fmt.number_of_decimal_places = 3
def absolute(self):
self.write_blocknum()
self.write( self.ABSOLUTE() + '\n')
def incremental(self):
self.write_blocknum()
self.write( self.INCREMENTAL() + '\n' )
def polar(self, on=True):
if (on) :
self.write_blocknum()
self.write(self.POLAR_ON() + '\n' )
else :
self.write_blocknum()
self.write(self.POLAR_OFF() + '\n' )
def set_plane(self, plane):
if (plane == 0) :
self.write_blocknum()
self.write('G17\n')
elif (plane == 1) :
self.write_blocknum()
self.write('G18\n')
elif (plane == 2) :
self.write_blocknum()
self.write('G19\n')
def comment(self, text):
self.write_blocknum()
############################################################################
## Tools
def tool_change(self, id):
self.write_blocknum()
self.write(('T%i' % id) + '\n')
self.t = id
def tool_defn(self, id, name='', params=None):
self.write_blocknum()
self.write(('T10%.2d' % id) + ' ')
if (radius != None):
self.write(('X%.3f' % radius) + ' ')
if (length != None):
self.write('Z%.3f' % length)
self.write('\n')
# This is the coordinate system we're using. G54->G59, G59.1, G59.2, G59.3
# These are selected by values from 1 to 9 inclusive.
def workplane(self, id):
if ((id >= 1) and (id <= 6)):
self.write_blocknum()
self.write( (self.WORKPLANE() % (id + self.WORKPLANE_BASE())) + '\n')
if ((id >= 7) and (id <= 9)):
self.write_blocknum()
self.write( ((self.WORKPLANE() % (6 + self.WORKPLANE_BASE())) + ('.%i' % (id - 6))) + '\n')
# inhibit N codes being generated for line numbers:
def write_blocknum(self):
pass
def drill(self, x=None, y=None, dwell=None, depthparams = None, retract_mode=None, spindle_mode=None, internal_coolant_on=None, rapid_to_clearance = None):
self.write('(Canned drill cycle ops are not yet supported here on this Anilam Crusader M postprocessor)')
nc.creator = Creator()
| bsd-3-clause |
matthewtownson/soapy | soapy/pyqtgraph/graphicsItems/GradientEditorItem.py | 9 | 37015 | import weakref
import numpy as np
from ..Qt import QtGui, QtCore
from ..python2_3 import sortList
from .. import functions as fn
from .GraphicsObject import GraphicsObject
from .GraphicsWidget import GraphicsWidget
from ..widgets.SpinBox import SpinBox
from ..pgcollections import OrderedDict
from ..colormap import ColorMap
from ..python2_3 import cmp
__all__ = ['TickSliderItem', 'GradientEditorItem']
Gradients = OrderedDict([
('thermal', {'ticks': [(0.3333, (185, 0, 0, 255)), (0.6666, (255, 220, 0, 255)), (1, (255, 255, 255, 255)), (0, (0, 0, 0, 255))], 'mode': 'rgb'}),
('flame', {'ticks': [(0.2, (7, 0, 220, 255)), (0.5, (236, 0, 134, 255)), (0.8, (246, 246, 0, 255)), (1.0, (255, 255, 255, 255)), (0.0, (0, 0, 0, 255))], 'mode': 'rgb'}),
('yellowy', {'ticks': [(0.0, (0, 0, 0, 255)), (0.2328863796753704, (32, 0, 129, 255)), (0.8362738179251941, (255, 255, 0, 255)), (0.5257586450247, (115, 15, 255, 255)), (1.0, (255, 255, 255, 255))], 'mode': 'rgb'} ),
('bipolar', {'ticks': [(0.0, (0, 255, 255, 255)), (1.0, (255, 255, 0, 255)), (0.5, (0, 0, 0, 255)), (0.25, (0, 0, 255, 255)), (0.75, (255, 0, 0, 255))], 'mode': 'rgb'}),
('spectrum', {'ticks': [(1.0, (255, 0, 255, 255)), (0.0, (255, 0, 0, 255))], 'mode': 'hsv'}),
('cyclic', {'ticks': [(0.0, (255, 0, 4, 255)), (1.0, (255, 0, 0, 255))], 'mode': 'hsv'}),
('greyclip', {'ticks': [(0.0, (0, 0, 0, 255)), (0.99, (255, 255, 255, 255)), (1.0, (255, 0, 0, 255))], 'mode': 'rgb'}),
('grey', {'ticks': [(0.0, (0, 0, 0, 255)), (1.0, (255, 255, 255, 255))], 'mode': 'rgb'}),
])
def addGradientListToDocstring():
"""Decorator to add list of current pre-defined gradients to the end of a function docstring."""
def dec(fn):
fn.__doc__ = fn.__doc__ + str(Gradients.keys()).strip('[').strip(']')
return fn
return dec
class TickSliderItem(GraphicsWidget):
## public class
"""**Bases:** :class:`GraphicsWidget <pyqtgraph.GraphicsWidget>`
A rectangular item with tick marks along its length that can (optionally) be moved by the user."""
def __init__(self, orientation='bottom', allowAdd=True, **kargs):
"""
============== =================================================================================
**Arguments:**
orientation Set the orientation of the gradient. Options are: 'left', 'right'
'top', and 'bottom'.
allowAdd Specifies whether ticks can be added to the item by the user.
tickPen Default is white. Specifies the color of the outline of the ticks.
Can be any of the valid arguments for :func:`mkPen <pyqtgraph.mkPen>`
============== =================================================================================
"""
## public
GraphicsWidget.__init__(self)
self.orientation = orientation
self.length = 100
self.tickSize = 15
self.ticks = {}
self.maxDim = 20
self.allowAdd = allowAdd
if 'tickPen' in kargs:
self.tickPen = fn.mkPen(kargs['tickPen'])
else:
self.tickPen = fn.mkPen('w')
self.orientations = {
'left': (90, 1, 1),
'right': (90, 1, 1),
'top': (0, 1, -1),
'bottom': (0, 1, 1)
}
self.setOrientation(orientation)
#self.setFrameStyle(QtGui.QFrame.NoFrame | QtGui.QFrame.Plain)
#self.setBackgroundRole(QtGui.QPalette.NoRole)
#self.setMouseTracking(True)
#def boundingRect(self):
#return self.mapRectFromParent(self.geometry()).normalized()
#def shape(self): ## No idea why this is necessary, but rotated items do not receive clicks otherwise.
#p = QtGui.QPainterPath()
#p.addRect(self.boundingRect())
#return p
def paint(self, p, opt, widget):
#p.setPen(fn.mkPen('g', width=3))
#p.drawRect(self.boundingRect())
return
def keyPressEvent(self, ev):
ev.ignore()
def setMaxDim(self, mx=None):
if mx is None:
mx = self.maxDim
else:
self.maxDim = mx
if self.orientation in ['bottom', 'top']:
self.setFixedHeight(mx)
self.setMaximumWidth(16777215)
else:
self.setFixedWidth(mx)
self.setMaximumHeight(16777215)
def setOrientation(self, orientation):
## public
"""Set the orientation of the TickSliderItem.
============== ===================================================================
**Arguments:**
orientation Options are: 'left', 'right', 'top', 'bottom'
The orientation option specifies which side of the slider the
ticks are on, as well as whether the slider is vertical ('right'
and 'left') or horizontal ('top' and 'bottom').
============== ===================================================================
"""
self.orientation = orientation
self.setMaxDim()
self.resetTransform()
ort = orientation
if ort == 'top':
transform = QtGui.QTransform.fromScale(1, -1)
transform.translate(0, -self.height())
self.setTransform(transform)
elif ort == 'left':
transform = QtGui.QTransform()
transform.rotate(270)
transform.scale(1, -1)
transform.translate(-self.height(), -self.maxDim)
self.setTransform(transform)
elif ort == 'right':
transform = QtGui.QTransform()
transform.rotate(270)
transform.translate(-self.height(), 0)
self.setTransform(transform)
elif ort != 'bottom':
raise Exception("%s is not a valid orientation. Options are 'left', 'right', 'top', and 'bottom'" %str(ort))
self.translate(self.tickSize/2., 0)
def addTick(self, x, color=None, movable=True):
## public
"""
Add a tick to the item.
============== ==================================================================
**Arguments:**
x Position where tick should be added.
color Color of added tick. If color is not specified, the color will be
white.
movable Specifies whether the tick is movable with the mouse.
============== ==================================================================
"""
if color is None:
color = QtGui.QColor(255,255,255)
tick = Tick(self, [x*self.length, 0], color, movable, self.tickSize, pen=self.tickPen)
self.ticks[tick] = x
tick.setParentItem(self)
return tick
def removeTick(self, tick):
## public
"""
Removes the specified tick.
"""
del self.ticks[tick]
tick.setParentItem(None)
if self.scene() is not None:
self.scene().removeItem(tick)
def tickMoved(self, tick, pos):
#print "tick changed"
## Correct position of tick if it has left bounds.
newX = min(max(0, pos.x()), self.length)
pos.setX(newX)
tick.setPos(pos)
self.ticks[tick] = float(newX) / self.length
def tickMoveFinished(self, tick):
pass
def tickClicked(self, tick, ev):
if ev.button() == QtCore.Qt.RightButton:
self.removeTick(tick)
def widgetLength(self):
if self.orientation in ['bottom', 'top']:
return self.width()
else:
return self.height()
def resizeEvent(self, ev):
wlen = max(40, self.widgetLength())
self.setLength(wlen-self.tickSize-2)
self.setOrientation(self.orientation)
#bounds = self.scene().itemsBoundingRect()
#bounds.setLeft(min(-self.tickSize*0.5, bounds.left()))
#bounds.setRight(max(self.length + self.tickSize, bounds.right()))
#self.setSceneRect(bounds)
#self.fitInView(bounds, QtCore.Qt.KeepAspectRatio)
def setLength(self, newLen):
#private
for t, x in list(self.ticks.items()):
t.setPos(x * newLen + 1, t.pos().y())
self.length = float(newLen)
#def mousePressEvent(self, ev):
#QtGui.QGraphicsView.mousePressEvent(self, ev)
#self.ignoreRelease = False
#for i in self.items(ev.pos()):
#if isinstance(i, Tick):
#self.ignoreRelease = True
#break
##if len(self.items(ev.pos())) > 0: ## Let items handle their own clicks
##self.ignoreRelease = True
#def mouseReleaseEvent(self, ev):
#QtGui.QGraphicsView.mouseReleaseEvent(self, ev)
#if self.ignoreRelease:
#return
#pos = self.mapToScene(ev.pos())
#if ev.button() == QtCore.Qt.LeftButton and self.allowAdd:
#if pos.x() < 0 or pos.x() > self.length:
#return
#if pos.y() < 0 or pos.y() > self.tickSize:
#return
#pos.setX(min(max(pos.x(), 0), self.length))
#self.addTick(pos.x()/self.length)
#elif ev.button() == QtCore.Qt.RightButton:
#self.showMenu(ev)
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.LeftButton and self.allowAdd:
pos = ev.pos()
if pos.x() < 0 or pos.x() > self.length:
return
if pos.y() < 0 or pos.y() > self.tickSize:
return
pos.setX(min(max(pos.x(), 0), self.length))
self.addTick(pos.x()/self.length)
elif ev.button() == QtCore.Qt.RightButton:
self.showMenu(ev)
#if ev.button() == QtCore.Qt.RightButton:
#if self.moving:
#ev.accept()
#self.setPos(self.startPosition)
#self.moving = False
#self.sigMoving.emit(self)
#self.sigMoved.emit(self)
#else:
#pass
#self.view().tickClicked(self, ev)
###remove
def hoverEvent(self, ev):
if (not ev.isExit()) and ev.acceptClicks(QtCore.Qt.LeftButton):
ev.acceptClicks(QtCore.Qt.RightButton)
## show ghost tick
#self.currentPen = fn.mkPen(255, 0,0)
#else:
#self.currentPen = self.pen
#self.update()
def showMenu(self, ev):
pass
def setTickColor(self, tick, color):
"""Set the color of the specified tick.
============== ==================================================================
**Arguments:**
tick Can be either an integer corresponding to the index of the tick
or a Tick object. Ex: if you had a slider with 3 ticks and you
wanted to change the middle tick, the index would be 1.
color The color to make the tick. Can be any argument that is valid for
:func:`mkBrush <pyqtgraph.mkBrush>`
============== ==================================================================
"""
tick = self.getTick(tick)
tick.color = color
tick.update()
#tick.setBrush(QtGui.QBrush(QtGui.QColor(tick.color)))
def setTickValue(self, tick, val):
## public
"""
Set the position (along the slider) of the tick.
============== ==================================================================
**Arguments:**
tick Can be either an integer corresponding to the index of the tick
or a Tick object. Ex: if you had a slider with 3 ticks and you
wanted to change the middle tick, the index would be 1.
val The desired position of the tick. If val is < 0, position will be
set to 0. If val is > 1, position will be set to 1.
============== ==================================================================
"""
tick = self.getTick(tick)
val = min(max(0.0, val), 1.0)
x = val * self.length
pos = tick.pos()
pos.setX(x)
tick.setPos(pos)
self.ticks[tick] = val
self.updateGradient()
def tickValue(self, tick):
## public
"""Return the value (from 0.0 to 1.0) of the specified tick.
============== ==================================================================
**Arguments:**
tick Can be either an integer corresponding to the index of the tick
or a Tick object. Ex: if you had a slider with 3 ticks and you
wanted the value of the middle tick, the index would be 1.
============== ==================================================================
"""
tick = self.getTick(tick)
return self.ticks[tick]
def getTick(self, tick):
## public
"""Return the Tick object at the specified index.
============== ==================================================================
**Arguments:**
tick An integer corresponding to the index of the desired tick. If the
argument is not an integer it will be returned unchanged.
============== ==================================================================
"""
if type(tick) is int:
tick = self.listTicks()[tick][0]
return tick
#def mouseMoveEvent(self, ev):
#QtGui.QGraphicsView.mouseMoveEvent(self, ev)
def listTicks(self):
"""Return a sorted list of all the Tick objects on the slider."""
## public
ticks = list(self.ticks.items())
sortList(ticks, lambda a,b: cmp(a[1], b[1])) ## see pyqtgraph.python2_3.sortList
return ticks
class GradientEditorItem(TickSliderItem):
"""
**Bases:** :class:`TickSliderItem <pyqtgraph.TickSliderItem>`
An item that can be used to define a color gradient. Implements common pre-defined gradients that are
customizable by the user. :class: `GradientWidget <pyqtgraph.GradientWidget>` provides a widget
with a GradientEditorItem that can be added to a GUI.
================================ ===========================================================
**Signals:**
sigGradientChanged(self) Signal is emitted anytime the gradient changes. The signal
is emitted in real time while ticks are being dragged or
colors are being changed.
sigGradientChangeFinished(self) Signal is emitted when the gradient is finished changing.
================================ ===========================================================
"""
sigGradientChanged = QtCore.Signal(object)
sigGradientChangeFinished = QtCore.Signal(object)
def __init__(self, *args, **kargs):
"""
Create a new GradientEditorItem.
All arguments are passed to :func:`TickSliderItem.__init__ <pyqtgraph.TickSliderItem.__init__>`
=============== =================================================================================
**Arguments:**
orientation Set the orientation of the gradient. Options are: 'left', 'right'
'top', and 'bottom'.
allowAdd Default is True. Specifies whether ticks can be added to the item.
tickPen Default is white. Specifies the color of the outline of the ticks.
Can be any of the valid arguments for :func:`mkPen <pyqtgraph.mkPen>`
=============== =================================================================================
"""
self.currentTick = None
self.currentTickColor = None
self.rectSize = 15
self.gradRect = QtGui.QGraphicsRectItem(QtCore.QRectF(0, self.rectSize, 100, self.rectSize))
self.backgroundRect = QtGui.QGraphicsRectItem(QtCore.QRectF(0, -self.rectSize, 100, self.rectSize))
self.backgroundRect.setBrush(QtGui.QBrush(QtCore.Qt.DiagCrossPattern))
self.colorMode = 'rgb'
TickSliderItem.__init__(self, *args, **kargs)
self.colorDialog = QtGui.QColorDialog()
self.colorDialog.setOption(QtGui.QColorDialog.ShowAlphaChannel, True)
self.colorDialog.setOption(QtGui.QColorDialog.DontUseNativeDialog, True)
self.colorDialog.currentColorChanged.connect(self.currentColorChanged)
self.colorDialog.rejected.connect(self.currentColorRejected)
self.colorDialog.accepted.connect(self.currentColorAccepted)
self.backgroundRect.setParentItem(self)
self.gradRect.setParentItem(self)
self.setMaxDim(self.rectSize + self.tickSize)
self.rgbAction = QtGui.QAction('RGB', self)
self.rgbAction.setCheckable(True)
self.rgbAction.triggered.connect(lambda: self.setColorMode('rgb'))
self.hsvAction = QtGui.QAction('HSV', self)
self.hsvAction.setCheckable(True)
self.hsvAction.triggered.connect(lambda: self.setColorMode('hsv'))
self.menu = QtGui.QMenu()
## build context menu of gradients
l = self.length
self.length = 100
global Gradients
for g in Gradients:
px = QtGui.QPixmap(100, 15)
p = QtGui.QPainter(px)
self.restoreState(Gradients[g])
grad = self.getGradient()
brush = QtGui.QBrush(grad)
p.fillRect(QtCore.QRect(0, 0, 100, 15), brush)
p.end()
label = QtGui.QLabel()
label.setPixmap(px)
label.setContentsMargins(1, 1, 1, 1)
act = QtGui.QWidgetAction(self)
act.setDefaultWidget(label)
act.triggered.connect(self.contextMenuClicked)
act.name = g
self.menu.addAction(act)
self.length = l
self.menu.addSeparator()
self.menu.addAction(self.rgbAction)
self.menu.addAction(self.hsvAction)
for t in list(self.ticks.keys()):
self.removeTick(t)
self.addTick(0, QtGui.QColor(0,0,0), True)
self.addTick(1, QtGui.QColor(255,0,0), True)
self.setColorMode('rgb')
self.updateGradient()
def setOrientation(self, orientation):
## public
"""
Set the orientation of the GradientEditorItem.
============== ===================================================================
**Arguments:**
orientation Options are: 'left', 'right', 'top', 'bottom'
The orientation option specifies which side of the gradient the
ticks are on, as well as whether the gradient is vertical ('right'
and 'left') or horizontal ('top' and 'bottom').
============== ===================================================================
"""
TickSliderItem.setOrientation(self, orientation)
self.translate(0, self.rectSize)
def showMenu(self, ev):
#private
self.menu.popup(ev.screenPos().toQPoint())
def contextMenuClicked(self, b=None):
#private
#global Gradients
act = self.sender()
self.loadPreset(act.name)
@addGradientListToDocstring()
def loadPreset(self, name):
"""
Load a predefined gradient. Currently defined gradients are:
"""## TODO: provide image with names of defined gradients
#global Gradients
self.restoreState(Gradients[name])
def setColorMode(self, cm):
"""
Set the color mode for the gradient. Options are: 'hsv', 'rgb'
"""
## public
if cm not in ['rgb', 'hsv']:
raise Exception("Unknown color mode %s. Options are 'rgb' and 'hsv'." % str(cm))
try:
self.rgbAction.blockSignals(True)
self.hsvAction.blockSignals(True)
self.rgbAction.setChecked(cm == 'rgb')
self.hsvAction.setChecked(cm == 'hsv')
finally:
self.rgbAction.blockSignals(False)
self.hsvAction.blockSignals(False)
self.colorMode = cm
self.updateGradient()
def colorMap(self):
"""Return a ColorMap object representing the current state of the editor."""
if self.colorMode == 'hsv':
raise NotImplementedError('hsv colormaps not yet supported')
pos = []
color = []
for t,x in self.listTicks():
pos.append(x)
c = t.color
color.append([c.red(), c.green(), c.blue(), c.alpha()])
return ColorMap(np.array(pos), np.array(color, dtype=np.ubyte))
def updateGradient(self):
#private
self.gradient = self.getGradient()
self.gradRect.setBrush(QtGui.QBrush(self.gradient))
self.sigGradientChanged.emit(self)
def setLength(self, newLen):
#private (but maybe public)
TickSliderItem.setLength(self, newLen)
self.backgroundRect.setRect(1, -self.rectSize, newLen, self.rectSize)
self.gradRect.setRect(1, -self.rectSize, newLen, self.rectSize)
self.updateGradient()
def currentColorChanged(self, color):
#private
if color.isValid() and self.currentTick is not None:
self.setTickColor(self.currentTick, color)
self.updateGradient()
def currentColorRejected(self):
#private
self.setTickColor(self.currentTick, self.currentTickColor)
self.updateGradient()
def currentColorAccepted(self):
self.sigGradientChangeFinished.emit(self)
def tickClicked(self, tick, ev):
#private
if ev.button() == QtCore.Qt.LeftButton:
self.raiseColorDialog(tick)
elif ev.button() == QtCore.Qt.RightButton:
self.raiseTickContextMenu(tick, ev)
def raiseColorDialog(self, tick):
if not tick.colorChangeAllowed:
return
self.currentTick = tick
self.currentTickColor = tick.color
self.colorDialog.setCurrentColor(tick.color)
self.colorDialog.open()
def raiseTickContextMenu(self, tick, ev):
self.tickMenu = TickMenu(tick, self)
self.tickMenu.popup(ev.screenPos().toQPoint())
def tickMoved(self, tick, pos):
#private
TickSliderItem.tickMoved(self, tick, pos)
self.updateGradient()
def tickMoveFinished(self, tick):
self.sigGradientChangeFinished.emit(self)
def getGradient(self):
"""Return a QLinearGradient object."""
g = QtGui.QLinearGradient(QtCore.QPointF(0,0), QtCore.QPointF(self.length,0))
if self.colorMode == 'rgb':
ticks = self.listTicks()
g.setStops([(x, QtGui.QColor(t.color)) for t,x in ticks])
elif self.colorMode == 'hsv': ## HSV mode is approximated for display by interpolating 10 points between each stop
ticks = self.listTicks()
stops = []
stops.append((ticks[0][1], ticks[0][0].color))
for i in range(1,len(ticks)):
x1 = ticks[i-1][1]
x2 = ticks[i][1]
dx = (x2-x1) / 10.
for j in range(1,10):
x = x1 + dx*j
stops.append((x, self.getColor(x)))
stops.append((x2, self.getColor(x2)))
g.setStops(stops)
return g
def getColor(self, x, toQColor=True):
"""
Return a color for a given value.
============== ==================================================================
**Arguments:**
x Value (position on gradient) of requested color.
toQColor If true, returns a QColor object, else returns a (r,g,b,a) tuple.
============== ==================================================================
"""
ticks = self.listTicks()
if x <= ticks[0][1]:
c = ticks[0][0].color
if toQColor:
return QtGui.QColor(c) # always copy colors before handing them out
else:
return (c.red(), c.green(), c.blue(), c.alpha())
if x >= ticks[-1][1]:
c = ticks[-1][0].color
if toQColor:
return QtGui.QColor(c) # always copy colors before handing them out
else:
return (c.red(), c.green(), c.blue(), c.alpha())
x2 = ticks[0][1]
for i in range(1,len(ticks)):
x1 = x2
x2 = ticks[i][1]
if x1 <= x and x2 >= x:
break
dx = (x2-x1)
if dx == 0:
f = 0.
else:
f = (x-x1) / dx
c1 = ticks[i-1][0].color
c2 = ticks[i][0].color
if self.colorMode == 'rgb':
r = c1.red() * (1.-f) + c2.red() * f
g = c1.green() * (1.-f) + c2.green() * f
b = c1.blue() * (1.-f) + c2.blue() * f
a = c1.alpha() * (1.-f) + c2.alpha() * f
if toQColor:
return QtGui.QColor(int(r), int(g), int(b), int(a))
else:
return (r,g,b,a)
elif self.colorMode == 'hsv':
h1,s1,v1,_ = c1.getHsv()
h2,s2,v2,_ = c2.getHsv()
h = h1 * (1.-f) + h2 * f
s = s1 * (1.-f) + s2 * f
v = v1 * (1.-f) + v2 * f
c = QtGui.QColor()
c.setHsv(h,s,v)
if toQColor:
return c
else:
return (c.red(), c.green(), c.blue(), c.alpha())
def getLookupTable(self, nPts, alpha=None):
"""
Return an RGB(A) lookup table (ndarray).
============== ============================================================================
**Arguments:**
nPts The number of points in the returned lookup table.
alpha True, False, or None - Specifies whether or not alpha values are included
in the table.If alpha is None, alpha will be automatically determined.
============== ============================================================================
"""
if alpha is None:
alpha = self.usesAlpha()
if alpha:
table = np.empty((nPts,4), dtype=np.ubyte)
else:
table = np.empty((nPts,3), dtype=np.ubyte)
for i in range(nPts):
x = float(i)/(nPts-1)
color = self.getColor(x, toQColor=False)
table[i] = color[:table.shape[1]]
return table
def usesAlpha(self):
"""Return True if any ticks have an alpha < 255"""
ticks = self.listTicks()
for t in ticks:
if t[0].color.alpha() < 255:
return True
return False
def isLookupTrivial(self):
"""Return True if the gradient has exactly two stops in it: black at 0.0 and white at 1.0"""
ticks = self.listTicks()
if len(ticks) != 2:
return False
if ticks[0][1] != 0.0 or ticks[1][1] != 1.0:
return False
c1 = fn.colorTuple(ticks[0][0].color)
c2 = fn.colorTuple(ticks[1][0].color)
if c1 != (0,0,0,255) or c2 != (255,255,255,255):
return False
return True
def mouseReleaseEvent(self, ev):
#private
TickSliderItem.mouseReleaseEvent(self, ev)
self.updateGradient()
def addTick(self, x, color=None, movable=True, finish=True):
"""
Add a tick to the gradient. Return the tick.
============== ==================================================================
**Arguments:**
x Position where tick should be added.
color Color of added tick. If color is not specified, the color will be
the color of the gradient at the specified position.
movable Specifies whether the tick is movable with the mouse.
============== ==================================================================
"""
if color is None:
color = self.getColor(x)
t = TickSliderItem.addTick(self, x, color=color, movable=movable)
t.colorChangeAllowed = True
t.removeAllowed = True
if finish:
self.sigGradientChangeFinished.emit(self)
return t
def removeTick(self, tick, finish=True):
TickSliderItem.removeTick(self, tick)
if finish:
self.updateGradient()
self.sigGradientChangeFinished.emit(self)
def saveState(self):
"""
Return a dictionary with parameters for rebuilding the gradient. Keys will include:
- 'mode': hsv or rgb
- 'ticks': a list of tuples (pos, (r,g,b,a))
"""
## public
ticks = []
for t in self.ticks:
c = t.color
ticks.append((self.ticks[t], (c.red(), c.green(), c.blue(), c.alpha())))
state = {'mode': self.colorMode, 'ticks': ticks}
return state
def restoreState(self, state):
"""
Restore the gradient specified in state.
============== ====================================================================
**Arguments:**
state A dictionary with same structure as those returned by
:func:`saveState <pyqtgraph.GradientEditorItem.saveState>`
Keys must include:
- 'mode': hsv or rgb
- 'ticks': a list of tuples (pos, (r,g,b,a))
============== ====================================================================
"""
## public
self.setColorMode(state['mode'])
for t in list(self.ticks.keys()):
self.removeTick(t, finish=False)
for t in state['ticks']:
c = QtGui.QColor(*t[1])
self.addTick(t[0], c, finish=False)
self.updateGradient()
self.sigGradientChangeFinished.emit(self)
def setColorMap(self, cm):
self.setColorMode('rgb')
for t in list(self.ticks.keys()):
self.removeTick(t, finish=False)
colors = cm.getColors(mode='qcolor')
for i in range(len(cm.pos)):
x = cm.pos[i]
c = colors[i]
self.addTick(x, c, finish=False)
self.updateGradient()
self.sigGradientChangeFinished.emit(self)
class Tick(QtGui.QGraphicsWidget): ## NOTE: Making this a subclass of GraphicsObject instead results in
## activating this bug: https://bugreports.qt-project.org/browse/PYSIDE-86
## private class
# When making Tick a subclass of QtGui.QGraphicsObject as origin,
# ..GraphicsScene.items(self, *args) will get Tick object as a
# class of QtGui.QMultimediaWidgets.QGraphicsVideoItem in python2.7-PyQt5(5.4.0)
sigMoving = QtCore.Signal(object)
sigMoved = QtCore.Signal(object)
def __init__(self, view, pos, color, movable=True, scale=10, pen='w'):
self.movable = movable
self.moving = False
self.view = weakref.ref(view)
self.scale = scale
self.color = color
self.pen = fn.mkPen(pen)
self.hoverPen = fn.mkPen(255,255,0)
self.currentPen = self.pen
self.pg = QtGui.QPainterPath(QtCore.QPointF(0,0))
self.pg.lineTo(QtCore.QPointF(-scale/3**0.5, scale))
self.pg.lineTo(QtCore.QPointF(scale/3**0.5, scale))
self.pg.closeSubpath()
QtGui.QGraphicsWidget.__init__(self)
self.setPos(pos[0], pos[1])
if self.movable:
self.setZValue(1)
else:
self.setZValue(0)
def boundingRect(self):
return self.pg.boundingRect()
def shape(self):
return self.pg
def paint(self, p, *args):
p.setRenderHints(QtGui.QPainter.Antialiasing)
p.fillPath(self.pg, fn.mkBrush(self.color))
p.setPen(self.currentPen)
p.drawPath(self.pg)
def mouseDragEvent(self, ev):
if self.movable and ev.button() == QtCore.Qt.LeftButton:
if ev.isStart():
self.moving = True
self.cursorOffset = self.pos() - self.mapToParent(ev.buttonDownPos())
self.startPosition = self.pos()
ev.accept()
if not self.moving:
return
newPos = self.cursorOffset + self.mapToParent(ev.pos())
newPos.setY(self.pos().y())
self.setPos(newPos)
self.view().tickMoved(self, newPos)
self.sigMoving.emit(self)
if ev.isFinish():
self.moving = False
self.sigMoved.emit(self)
self.view().tickMoveFinished(self)
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.RightButton and self.moving:
ev.accept()
self.setPos(self.startPosition)
self.view().tickMoved(self, self.startPosition)
self.moving = False
self.sigMoving.emit(self)
self.sigMoved.emit(self)
else:
self.view().tickClicked(self, ev)
##remove
def hoverEvent(self, ev):
if (not ev.isExit()) and ev.acceptDrags(QtCore.Qt.LeftButton):
ev.acceptClicks(QtCore.Qt.LeftButton)
ev.acceptClicks(QtCore.Qt.RightButton)
self.currentPen = self.hoverPen
else:
self.currentPen = self.pen
self.update()
class TickMenu(QtGui.QMenu):
def __init__(self, tick, sliderItem):
QtGui.QMenu.__init__(self)
self.tick = weakref.ref(tick)
self.sliderItem = weakref.ref(sliderItem)
self.removeAct = self.addAction("Remove Tick", lambda: self.sliderItem().removeTick(tick))
if (not self.tick().removeAllowed) or len(self.sliderItem().ticks) < 3:
self.removeAct.setEnabled(False)
positionMenu = self.addMenu("Set Position")
w = QtGui.QWidget()
l = QtGui.QGridLayout()
w.setLayout(l)
value = sliderItem.tickValue(tick)
self.fracPosSpin = SpinBox()
self.fracPosSpin.setOpts(value=value, bounds=(0.0, 1.0), step=0.01, decimals=2)
#self.dataPosSpin = SpinBox(value=dataVal)
#self.dataPosSpin.setOpts(decimals=3, siPrefix=True)
l.addWidget(QtGui.QLabel("Position:"), 0,0)
l.addWidget(self.fracPosSpin, 0, 1)
#l.addWidget(QtGui.QLabel("Position (data units):"), 1, 0)
#l.addWidget(self.dataPosSpin, 1,1)
#if self.sliderItem().dataParent is None:
# self.dataPosSpin.setEnabled(False)
a = QtGui.QWidgetAction(self)
a.setDefaultWidget(w)
positionMenu.addAction(a)
self.fracPosSpin.sigValueChanging.connect(self.fractionalValueChanged)
#self.dataPosSpin.valueChanged.connect(self.dataValueChanged)
colorAct = self.addAction("Set Color", lambda: self.sliderItem().raiseColorDialog(self.tick()))
if not self.tick().colorChangeAllowed:
colorAct.setEnabled(False)
def fractionalValueChanged(self, x):
self.sliderItem().setTickValue(self.tick(), self.fracPosSpin.value())
#if self.sliderItem().dataParent is not None:
# self.dataPosSpin.blockSignals(True)
# self.dataPosSpin.setValue(self.sliderItem().tickDataValue(self.tick()))
# self.dataPosSpin.blockSignals(False)
#def dataValueChanged(self, val):
# self.sliderItem().setTickValue(self.tick(), val, dataUnits=True)
# self.fracPosSpin.blockSignals(True)
# self.fracPosSpin.setValue(self.sliderItem().tickValue(self.tick()))
# self.fracPosSpin.blockSignals(False)
| gpl-3.0 |
EvanzzzZ/mxnet | python/mxnet/ndarray.py | 5 | 80441 | # coding: utf-8
# pylint: disable= too-many-lines, redefined-builtin, protected-access
# pylint: disable=import-error, no-name-in-module, undefined-variable
"""NDArray API of MXNet."""
from __future__ import absolute_import
from __future__ import division
try:
from __builtin__ import slice as py_slice
except ImportError:
from builtins import slice as py_slice
import ctypes
import warnings
import os as _os
import sys as _sys
import operator
import numpy as np
from .base import _LIB, string_types, numeric_types
from .base import c_array, py_str, c_str, mx_real_t, _Null # pylint: disable=unused-import
from .base import mx_uint, NDArrayHandle, check_call, OpHandle
from .base import ctypes2buffer
from .context import Context
from . import _ndarray_internal as _internal
from .ndarray_doc import _build_doc
# Use different verison of SymbolBase
# When possible, use cython to speedup part of computation.
# pylint: disable=unused-import
try:
if int(_os.environ.get("MXNET_ENABLE_CYTHON", True)) == 0:
from ._ctypes.ndarray import NDArrayBase, _set_ndarray_class
from ._ctypes.ndarray import CachedOp, _imperative_invoke
elif _sys.version_info >= (3, 0):
from ._cy3.ndarray import NDArrayBase, _set_ndarray_class, _imperative_invoke
from ._cy3.ndarray import CachedOp, _imperative_invoke
else:
from ._cy2.ndarray import NDArrayBase, _set_ndarray_class, _imperative_invoke
from ._cy2.ndarray import CachedOp, _imperative_invoke
except ImportError:
if int(_os.environ.get("MXNET_ENFORCE_CYTHON", False)) != 0:
raise ImportError("Cython Module cannot be loaded but MXNET_ENFORCE_CYTHON=1")
from ._ctypes.ndarray import NDArrayBase, _set_ndarray_class, _imperative_invoke
from ._ctypes.ndarray import CachedOp, _imperative_invoke
# pylint: enable=unused-import
# pylint: disable= no-member
_DTYPE_NP_TO_MX = {
np.float32 : 0,
np.float64 : 1,
np.float16 : 2,
np.uint8 : 3,
np.int32 : 4
}
_DTYPE_MX_TO_NP = {
0 : np.float32,
1 : np.float64,
2 : np.float16,
3 : np.uint8,
4 : np.int32
}
# pylint: enable= no-member
def _new_empty_handle():
"""Returns a new empty handle.
Empty handle can be used to hold a result.
Returns
-------
handle
A new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateNone(ctypes.byref(hdl)))
return hdl
def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
"""Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateEx(
c_array(mx_uint, shape),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
ctypes.byref(hdl)))
return hdl
def waitall():
"""Wait for all async operations to finish in MXNet.
This function is used for benchmarking only.
"""
check_call(_LIB.MXNDArrayWaitAll())
class NDArray(NDArrayBase):
"""An array object representing a multidimensional, homogeneous array of
fixed-size items.
"""
__slots__ = []
# pylint: disable= no-member, undefined-variable
def __repr__(self):
"""Returns a string representation of the array."""
shape_info = 'x'.join(['%d' % x for x in self.shape])
return '<%s %s @%s>' % (self.__class__.__name__,
shape_info, self.context)
def __add__(self, other):
"""x.__add__(y) <=> x+y <=> mx.nd.add(x, y) """
return add(self, other)
def __iadd__(self, other):
"""x.__iadd__(y) <=> x+=y """
if not self.writable:
raise ValueError('trying to add to a readonly NDArray')
if isinstance(other, NDArray):
return broadcast_add(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._plus_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
"""x.__sub__(y) <=> x-y <=> mx.nd.subtract(x, y) """
return subtract(self, other)
def __isub__(self, other):
"""x.__isub__(y) <=> x-=y """
if not self.writable:
raise ValueError('trying to subtract from a readonly NDArray')
if isinstance(other, NDArray):
return broadcast_sub(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._minus_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rsub__(self, other):
"""x.__rsub__(y) <=> y-x <=> mx.nd.subtract(y, x) """
return subtract(other, self)
def __mul__(self, other):
"""x.__mul__(y) <=> x*y <=> mx.nd.multiply(x, y) """
return multiply(self, other)
def __neg__(self):
"""x.__neg__(y) <=> -x """
return _internal._mul_scalar(self, -1.0)
def __imul__(self, other):
"""x.__imul__(y) <=> x*=y """
if not self.writable:
raise ValueError('trying to multiply to a readonly NDArray')
if isinstance(other, NDArray):
return broadcast_mul(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._mul_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
"""x.__div__(y) <=> x/y <=> mx.nd.divide(x, y) """
return divide(self, other)
def __rdiv__(self, other):
"""x.__rdiv__(y) <=> y/x <=> mx.nd.divide(y, x) """
return divide(other, self)
def __idiv__(self, other):
"""x.__rdiv__(y) <=> x/=y """
if not self.writable:
raise ValueError('trying to divide from a readonly NDArray')
if isinstance(other, NDArray):
return broadcast_div(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._div_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __truediv__(self, other):
return divide(self, other)
def __rtruediv__(self, other):
return divide(other, self)
def __itruediv__(self, other):
return self.__idiv__(other)
def __mod__(self, other):
"""x.__mod__(y) <=> x%y <=> mx.nd.modulo(x, y) """
return modulo(self, other)
def __rmod__(self, other):
"""x.__rmod__(y) <=> y%x <=> mx.nd.modulo(y, x) """
return modulo(other, self)
def __imod__(self, other):
"""x.__rmod__(y) <=> x%=y """
if not self.writable:
raise ValueError('trying to take modulo from a readonly NDArray')
if isinstance(other, NDArray):
return broadcast_mod(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._mod_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __pow__(self, other):
"""x.__pow__(y) <=> x**y <=> mx.nd.power(x,y) """
return power(self, other)
def __rpow__(self, other):
"""x.__pow__(y) <=> y**x <=> mx.nd.power(y,x) """
return power(other, self)
def __eq__(self, other):
"""x.__eq__(y) <=> x==y <=> mx.nd.equal(x, y) """
return equal(self, other)
def __ne__(self, other):
"""x.__ne__(y) <=> x!=y <=> mx.nd.not_equal(x, y) """
return not_equal(self, other)
def __gt__(self, other):
"""x.__gt__(y) <=> x>y <=> mx.nd.greater(x, y) """
return greater(self, other)
def __ge__(self, other):
"""x.__ge__(y) <=> x>=y <=> mx.nd.greater_equal(x, y) """
return greater_equal(self, other)
def __lt__(self, other):
"""x.__lt__(y) <=> x<y <=> mx.nd.lesser(x, y) """
return lesser(self, other)
def __le__(self, other):
"""x.__le__(y) <=> x<=y <=> mx.nd.less_equal(x, y) """
return lesser_equal(self, other)
def __bool__(self):
raise ValueError("The truth value of an NDArray with more than one element is ambiguous.")
__nonzero__ = __bool__
def __getstate__(self):
handle = self.handle
this = {'handle' : None}
if handle is not None:
length = ctypes.c_size_t()
cptr = ctypes.POINTER(ctypes.c_char)()
check_call(_LIB.MXNDArraySaveRawBytes(self.handle,
ctypes.byref(length),
ctypes.byref(cptr)))
this['handle'] = ctypes2buffer(cptr, length.value)
return this
def __setstate__(self, state):
# pylint: disable=assigning-non-slot
handle = state['handle']
if handle is not None:
buf = handle
handle = NDArrayHandle()
ptr = (ctypes.c_char * len(buf)).from_buffer(buf)
length = ctypes.c_size_t(len(buf))
check_call(_LIB.MXNDArrayLoadFromRawBytes(ptr, length, ctypes.byref(handle)))
self.handle = handle
else:
self.handle = None
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value.
Parameters
----------
key : int, slice or tuple
The indexing key.
value : scalar, NDArray or numpy.ndarray
The value to set.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> x[:] = 1
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> x[:,1:2] = 2
>>> x.asnumpy()
array([[ 1., 2., 1.],
[ 1., 2., 1.]], dtype=float32)
>>> x[1:2,1:] = 3
>>> x.asnumpy()
array([[ 1., 2., 1.],
[ 1., 3., 3.]], dtype=float32)
>>> x[1:,0:2] = mx.nd.zeros((1,2))
>>> x.asnumpy()
array([[ 1., 2., 1.],
[ 0., 0., 3.]], dtype=float32)
>>> x[1,2] = 4
>>> x.asnumpy()
array([[ 1., 2., 1.],
[ 0., 0., 4.]], dtype=float32)
"""
# pylint: disable=too-many-branches
if not self.writable:
raise ValueError('Failed to assign to a readonly NDArray')
if isinstance(key, int):
sliced_arr = self._at(key)
sliced_arr[:] = value
return
if isinstance(key, py_slice):
if key.step is not None:
raise ValueError('NDArray only supports continuous slicing on axis 0')
if key.start is not None or key.stop is not None:
sliced_arr = self._slice(key.start, key.stop)
sliced_arr[:] = value
return
if isinstance(value, NDArray):
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
_internal._set_value(float(value), out=self)
elif isinstance(value, (np.ndarray, np.generic)):
self._sync_copyfrom(value)
else:
raise TypeError('type %s not supported' % str(type(value)))
if isinstance(key, tuple):
# multi-dimension indexing
my_shape = self.shape
assert len(key) == len(my_shape)
for slice_i in key:
assert isinstance(slice_i, (py_slice, int))
begin = [0 for _ in my_shape]
end = [x for x in my_shape]
for i, slice_i in enumerate(key):
if isinstance(slice_i, int):
assert slice_i < my_shape[i]
begin[i] = slice_i
end[i] = slice_i + 1
if isinstance(slice_i, py_slice):
# only support continuous slicing
assert slice_i.step is None
begin[i] = slice_i.start or 0
end[i] = slice_i.stop or my_shape[i]
assert begin[i] < end[i]
assert end[i] <= my_shape[i]
begin = tuple(begin)
end = tuple(end)
if isinstance(value, NDArray):
value = value.as_in_context(self.context)
_internal._crop_assign(self, value, out=self,
begin=begin, end=end)
elif isinstance(value, numeric_types):
_internal._crop_assign_scalar(self, out=self,
begin=begin, end=end,
scalar=value)
elif isinstance(value, (np.ndarray, np.generic)):
value = array(value, ctx=self.context)
_internal._crop_assign(self, value, out=self,
begin=begin, end=end)
else:
raise TypeError('type %s not supported' % str(type(value)))
# pylint: enable=too-many-branches
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of this array.
Parameters
----------
key : int or slice
Indexing key.
Examples
--------
>>> x = mx.nd.arange(0,6).reshape((2,3))
>>> x.asnumpy()
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x[1].asnumpy()
array([ 3., 4., 5.], dtype=float32)
>>> y = x[0:1]
>>> y[:] = 2
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 3., 4., 5.]], dtype=float32)
"""
# multi-dimensional slicing is not supported yet
if isinstance(key, int):
if key > self.shape[0] - 1:
raise IndexError(
'index {} is out of bounds for axis 0 with size {}'.format(
key, self.shape[0]))
return self._at(key)
if isinstance(key, py_slice):
if key.step is not None:
raise ValueError('NDArray only supports continuous slicing on axis 0')
if key.start is not None or key.stop is not None:
return self._slice(key.start, key.stop)
else:
return self
if isinstance(key, tuple):
raise ValueError('Multi-dimension indexing is not supported')
def _sync_copyfrom(self, source_array):
"""Performs a synchronized copy from the `source_array` to the current array.
This is called through ``x[:] = source_array``, where the `source_array`
is a `numpy.ndarray` or array-like object.
This function blocks until all the pending read/write operations with respect
to the current `NDArray` are finished and carry out the copy operation to the
current NDArray.
Parameters
----------
source_array : array_like
The data source we would like to copy from.
Example
-------
>>> a = mx.nd.array([1, 2])
>>> a.asnumpy()
array([ 1., 2.], dtype=float32)
>>> a[:] = np.array([3, 4])
>> a.asnumpy()
array([ 3., 4.], dtype=float32)
"""
if not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=self.dtype)
except:
raise TypeError('array must consist of array-like data,' +
'type %s is not supported' % str(type(array)))
source_array = np.ascontiguousarray(source_array, dtype=self.dtype)
if source_array.shape != self.shape:
raise ValueError('Shape inconsistent: expected %s vs got %s'%(
str(self.shape), str(source_array.shape)))
check_call(_LIB.MXNDArraySyncCopyFromCPU(
self.handle,
source_array.ctypes.data_as(ctypes.c_void_p),
ctypes.c_size_t(source_array.size)))
def _slice(self, start, stop):
"""Returns a sliced NDArray that shares memory with the current one.
This is called through ``x[start:stop]``.
Parameters
----------
start : int
Starting inclusive index of slice in the first dim.
stop : int
Finishing exclusive index of slice in the first dim.
Returns
-------
`NDArray` sharing the memory with the current one sliced from
start to stop in the first dim.
Examples:
>>> a = mx.nd.array([[1,2], [3, 4], [5, 6], [7, 8]])
>>> a[1:2].asnumpy()
array([[ 3., 4.]], dtype=float32)
>>> a[1:1].asnumpy()
array([], shape=(0, 2), dtype=float32)
"""
handle = NDArrayHandle()
start = mx_uint(start) if start else mx_uint(0)
stop = mx_uint(stop) if stop else mx_uint(self.shape[0])
check_call(_LIB.MXNDArraySlice(
self.handle, start, stop, ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
def _at(self, idx):
"""Returns a view of the array sliced at `idx` in the first dim.
This is called through ``x[idx]``.
Parameters
----------
idx : int
index for slicing the `NDArray` in the first dim.
Returns
-------
NDArray
`NDArray` sharing the memory with the current one sliced at `idx` in the first dim.
Examples
--------
>>> a = mx.nd.array([[1,2], [3, 4]])
>>> a[1].asnumpy()
array([ 3., 4.], dtype=float32)
>>> b = mx.nd.array([1, 2, 3, 4])
>>> b[0].asnumpy()
array([ 1.], dtype=float32)
"""
handle = NDArrayHandle()
idx = mx_uint(idx)
check_call(_LIB.MXNDArrayAt(
self.handle, idx, ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
def reshape(self, shape):
"""Returns a **view** of this array with a new shape without altering any data.
Parameters
----------
shape : tuple of int
The new shape should not change the array size, namely
``np.prod(new_shape)`` should be equal to ``np.prod(self.shape)``.
One dimension can be -1. In this case, the value is inferred
from the length of the array and remaining dimensions.
0 Dimensions in shape will be copied from original shape, i.e.
if x.shape == (3, 4, 5), x.reshape((0, 20)).shape will be (3, 20).
Returns
-------
NDArray
An array with desired shape that shares data with this array.
Examples
--------
>>> x = mx.nd.arange(0,6).reshape((2,3))
>>> x.asnumpy()
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> y = x.reshape((3,2))
>>> y.asnumpy()
array([[ 0., 1.],
[ 2., 3.],
[ 4., 5.]], dtype=float32)
>>> y = x.reshape((3,-1))
>>> y.asnumpy()
array([[ 0., 1.],
[ 2., 3.],
[ 4., 5.]], dtype=float32)
>>> y[:] = -1
>>> x.asnumpy()
array([[-1., -1., -1.],
[-1., -1., -1.]], dtype=float32)
"""
handle = NDArrayHandle()
# Actual reshape
check_call(_LIB.MXNDArrayReshape(self.handle,
len(shape),
c_array(ctypes.c_int, shape),
ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
# pylint: disable= undefined-variable
def broadcast_to(self, shape):
"""Broadcasts the input array to a new shape.
Broadcasting is only allowed on axes with size 1. The new shape cannot change
the number of dimensions.
For example, you could broadcast from shape (2, 1) to (2, 3), but not from
shape (2, 3) to (2, 3, 3).
Parameters
----------
shape : tuple of int
The shape of the desired array.
Returns
-------
NDArray
A NDArray with the desired shape that is not sharing data with this
array, even if the new shape is the same as ``self.shape``.
Examples
--------
>>> x = mx.nd.arange(0,3).reshape((1,3,1))
>>> x.asnumpy()
array([[[ 0.],
[ 1.],
[ 2.]]], dtype=float32)
>>> y = x.broadcast_to((2,3,3))
>>> y.asnumpy()
array([[[ 0., 0., 0.],
[ 1., 1., 1.],
[ 2., 2., 2.]],
<BLANKLINE>
[[ 0., 0., 0.],
[ 1., 1., 1.],
[ 2., 2., 2.]]], dtype=float32)
"""
cur_shape = self.shape
err_str = 'operands could not be broadcast together with remapped shapes' \
'[original->remapped]: {} and requested shape {}'.format(cur_shape, shape)
if len(shape) < len(cur_shape):
raise ValueError(err_str)
cur_shape = (1,) * (len(shape) - len(cur_shape)) + cur_shape
cur_shape_arr = np.array(cur_shape)
broadcasting_axes = np.nonzero(cur_shape_arr != np.array(shape))
if (cur_shape_arr[broadcasting_axes] != 1).any():
raise ValueError(err_str)
if cur_shape != self.shape:
return broadcast_to(self.reshape(cur_shape), shape=shape)
else:
return broadcast_to(self, shape=tuple(shape))
# pylint: enable= undefined-variable
def wait_to_read(self):
"""Waits until all previous write operations on the current array are finished.
This method guarantees that all previous write operations that pushed
into the backend engine for execution are actually finished.
Examples
--------
>>> import time
>>> tic = time.time()
>>> a = mx.nd.ones((1000,1000))
>>> b = mx.nd.dot(a, a)
>>> print(time.time() - tic) # doctest: +SKIP
0.003854036331176758
>>> b.wait_to_read()
>>> print(time.time() - tic) # doctest: +SKIP
0.0893700122833252
"""
check_call(_LIB.MXNDArrayWaitToRead(self.handle))
@property
def ndim(self):
"""Returns the number of dimensions of this array
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.ndim
1
>>> x = mx.nd.array([[1, 2], [3, 4]])
>>> x.ndim
2
"""
return len(self.shape)
@property
def shape(self):
"""Tuple of array dimensions.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.shape
(4L,)
>>> y = mx.nd.zeros((2, 3, 4))
>>> y.shape
(2L, 3L, 4L)
"""
ndim = mx_uint()
pdata = ctypes.POINTER(mx_uint)()
check_call(_LIB.MXNDArrayGetShape(
self.handle, ctypes.byref(ndim), ctypes.byref(pdata)))
return tuple(pdata[:ndim.value])
@property
def size(self):
"""Number of elements in the array.
Equivalent to the product of the array’s dimensions.
Examples
--------
>>> import numpy as np
>>> x = mx.nd.zeros((3, 5, 2))
>>> x.size
30
>>> np.prod(x.shape)
30
"""
return np.prod(self.shape)
@property
def context(self):
"""Device context of the array.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.context
cpu(0)
>>> type(x.context)
<class 'mxnet.context.Context'>
>>> y = mx.nd.zeros((2,3), mx.gpu(0))
>>> y.context
gpu(0)
"""
dev_typeid = ctypes.c_int()
dev_id = ctypes.c_int()
check_call(_LIB.MXNDArrayGetContext(
self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id)))
return Context(Context.devtype2str[dev_typeid.value], dev_id.value)
@property
def dtype(self):
"""Data-type of the array’s elements.
Returns
-------
numpy.dtype
This NDArray's data type.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> x.dtype
<type 'numpy.float32'>
>>> y = mx.nd.zeros((2,3), dtype='int32')
>>> y.dtype
<type 'numpy.int32'>
"""
mx_dtype = ctypes.c_int()
check_call(_LIB.MXNDArrayGetDType(
self.handle, ctypes.byref(mx_dtype)))
return _DTYPE_MX_TO_NP[mx_dtype.value]
@property
# pylint: disable= invalid-name, undefined-variable
def T(self):
"""Returns a copy of the array with axes transposed.
Equivalent to ``mx.nd.transpose(self)`` except that
self is returned if ``self.ndim < 2``.
Unlike ``numpy.ndarray.T``, this function returns a copy
rather than a view of the array unless ``self.ndim < 2``.
Examples
--------
>>> x = mx.nd.arange(0,6).reshape((2,3))
>>> x.asnumpy()
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x.T.asnumpy()
array([[ 0., 3.],
[ 1., 4.],
[ 2., 5.]], dtype=float32)
"""
if len(self.shape) < 2:
return self
return transpose(self)
# pylint: enable= invalid-name, undefined-variable
@property
def _fresh_grad(self):
"""Whether this array's corresponding gradient array
(registered via `autograd.mark_variables`) has been
updated by `autograd.backward` since last reset.
`_fresh_grad` need to be manually set to False
after consuming gradient (usually after updating this
array).
"""
out = ctypes.c_int()
check_call(_LIB.MXNDArrayGetGradState(self.handle, ctypes.byref(out)))
return out.value
@_fresh_grad.setter
def _fresh_grad(self, state):
check_call(_LIB.MXNDArraySetGradState(self.handle, ctypes.c_int(state)))
def asnumpy(self):
"""Returns a ``numpy.ndarray`` object with value copied from this array.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.asnumpy()
>>> type(y)
<type 'numpy.ndarray'>
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> z = mx.nd.ones((2,3), dtype='int32')
>>> z.asnumpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
"""
data = np.empty(self.shape, dtype=self.dtype)
check_call(_LIB.MXNDArraySyncCopyToCPU(
self.handle,
data.ctypes.data_as(ctypes.c_void_p),
ctypes.c_size_t(data.size)))
return data
def asscalar(self):
"""Returns a scalar whose value is copied from this array.
This function is equivalent to ``self.asnumpy()[0]``. This NDArray must have shape (1,).
Examples
--------
>>> x = mx.nd.ones((1,), dtype='int32')
>>> x.asscalar()
1
>>> type(x.asscalar())
<type 'numpy.int32'>
"""
if self.shape != (1,):
raise ValueError("The current array is not a scalar")
return self.asnumpy()[0]
def astype(self, dtype):
"""Returns a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
Examples
--------
>>> x = mx.nd.zeros((2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
"""
res = empty(self.shape, ctx=self.context, dtype=dtype)
self.copyto(res)
return res
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``NDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or Context
The destination array or context.
Returns
-------
NDArray
The copied array. If ``other`` is an ``NDArray``, then the return value
and ``other`` will point to the same ``NDArray``.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.zeros((2,3), mx.gpu(0))
>>> z = x.copyto(y)
>>> z is y
True
>>> y.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.copyto(mx.gpu(0))
<NDArray 2x3 @gpu(0)>
"""
if isinstance(other, NDArray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return
return _internal._copyto(self, out=other)
elif isinstance(other, Context):
hret = NDArray(_new_alloc_handle(self.shape, other, True, self.dtype))
return _internal._copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def copy(self):
"""Makes a copy of this ``NDArray``, keeping the same context.
Returns
-------
NDArray
The copied array
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.copy()
>>> y.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
"""
return self.copyto(self.context)
def as_in_context(self, context):
"""Returns an array on the target device with the same value as this array.
If the target context is the same as ``self.context``, then ``self`` is
returned. Otherwise, a copy is made.
Parameters
----------
context : Context
The target context.
Returns
-------
NDArray
The target array.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.as_in_context(mx.cpu())
>>> y is x
True
>>> z = x.as_in_context(mx.gpu(0))
>>> z is x
False
"""
if self.context == context:
return self
return self.copyto(context)
def detach(self):
"""Returns a new NDArray, detached from the current graph."""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl)))
return NDArray(hdl)
def backward(self, out_grad=None, retain_graph=False):
"""Compute the gradients of this NDArray w.r.t variables.
Parameters
----------
out_grad: list of NDArray or None
"""
if out_grad is None:
ograd_handles = [NDArrayHandle(0)]
else:
ograd_handles = [out_grad.handle]
check_call(_LIB.MXAutogradBackward(
1, c_array(NDArrayHandle, [self.handle]),
c_array(NDArrayHandle, ograd_handles),
ctypes.c_int(retain_graph)))
def onehot_encode(indices, out):
"""One-hot encoding indices into matrix out.
.. note:: `onehot_encode` is deprecated. Use `one_hot` instead.
"""
# pylint: disable= no-member, protected-access
return _internal._onehot_encode(indices, out, out=out)
# pylint: enable= no-member, protected-access
def empty(shape, ctx=None, dtype=mx_real_t):
"""Returns a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
Returns
-------
NDArray
A created array.
Examples
--------
>>> mx.nd.empty(1)
<NDArray 1 @cpu(0)>
>>> mx.nd.empty((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.empty((1,2), mx.gpu(0), 'float16')
<NDArray 1x2 @gpu(0)>
"""
if isinstance(shape, int):
shape = (shape, )
if ctx is None:
ctx = Context.default_ctx
return NDArray(handle=_new_alloc_handle(shape, ctx, False, dtype))
def zeros(shape, ctx=None, dtype=mx_real_t, **kwargs):
"""Returns a new array filled with all zeros, with the given shape and type.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A created array
Examples
--------
>>> mx.nd.zeros(1).asnumpy()
array([ 0.], dtype=float32)
>>> mx.nd.zeros((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.zeros((1,2), mx.gpu(0), 'float16').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
# pylint: disable= unused-argument
if ctx is None:
ctx = Context.default_ctx
# pylint: disable= no-member, protected-access
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, **kwargs)
# pylint: enable= no-member, protected-access
def ones(shape, ctx=None, dtype=mx_real_t, **kwargs):
"""Returns a new array filled with all ones, with the given shape and type.
Parameters
----------
shape : int or tuple of int or list of int
The shape of the empty array.
ctx : Context, optional
An optional device context.
Defaults to the current default context (``mxnet.Context.default_ctx``).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A new array of the specified shape filled with all ones.
Examples
--------
>>> mx.nd.ones(1).asnumpy()
array([ 1.], dtype=float32)
>>> mx.nd.ones((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.ones((1,2), dtype='float16').asnumpy()
array([[ 1., 1.]], dtype=float16)
"""
# pylint: disable= unused-argument
if ctx is None:
ctx = Context.default_ctx
# pylint: disable= no-member, protected-access
return _internal._ones(shape=shape, ctx=ctx, dtype=dtype, **kwargs)
# pylint: enable= no-member, protected-access
def full(shape, val, ctx=None, dtype=mx_real_t, out=None):
"""Returns a new array of given shape and type, filled with the given value `val`.
Parameters
--------
shape : int or tuple of int
The shape of the new array.
val : scalar
Fill value.
ctx : Context, optional
Device context (default is the current default context).
dtype : `str` or `numpy.dtype`, optional
The data type of the returned `NDArray`. The default datatype is `float32`.
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
`NDArray` filled with `val`, with the given shape, ctx, and dtype.
Examples
--------
>>> mx.nd.full(1, 2.0).asnumpy()
array([ 2.], dtype=float32)
>>> mx.nd.full((1, 2), 2.0, mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.full((1, 2), 2.0, dtype='float16').asnumpy()
array([[ 2., 2.]], dtype=float16)
"""
out = empty(shape, ctx, dtype) if out is None else out
out[:] = val
return out
def array(source_array, ctx=None, dtype=None):
"""Creates an array from any object exposing the array interface.
Parameters
----------
source_array : array_like
An object exposing the array interface, an object whose `__array__`
method returns an array, or any (nested) sequence.
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `float32` otherwise.
Returns
-------
NDArray
An `NDArray` with the same contents as the `source_array`.
Examples
--------
>>> import numpy as np
>>> mx.nd.array([1, 2, 3])
<NDArray 3 @cpu(0)>
>>> mx.nd.array([[1, 2], [3, 4]])
<NDArray 2x2 @cpu(0)>
>>> mx.nd.array(np.zeros((3, 2)))
<NDArray 3x2 @cpu(0)>
>>> mx.nd.array(np.zeros((3, 2)), mx.gpu(0))
<NDArray 3x2 @gpu(0)>
"""
if isinstance(source_array, NDArray):
dtype = source_array.dtype if dtype is None else dtype
else:
dtype = mx_real_t if dtype is None else dtype
if not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=dtype)
except:
raise TypeError('source_array must be array like object')
arr = empty(source_array.shape, ctx, dtype)
arr[:] = source_array
return arr
def moveaxis(tensor, source, destination):
"""Moves the `source` axis into the `destination` position
while leaving the other axes in their original order
Parameters
----------
tensor : mx.nd.array
The array which axes should be reordered
source : int
Original position of the axes to move.
destination : int
Destination position for each of the original axes.
Returns
-------
result : mx.nd.array
Array with moved axes.
Examples
--------
>>> X = mx.nd.array([[1, 2, 3], [4, 5, 6]])
>>> mx.nd.moveaxis(X, 0, 1).shape
(3L, 2L)
"""
axes = list(range(tensor.ndim))
try:
axes.pop(source)
except IndexError:
raise ValueError('Source should verify 0 <= source < tensor.ndim'
'Got %d' % source)
try:
axes.insert(destination, source)
except IndexError:
raise ValueError('Destination should verify 0 <= destination < tensor.ndim'
'Got %d' % destination)
return transpose(tensor, axes)
# pylint: disable= no-member, protected-access, too-many-arguments
def arange(start, stop=None, step=1.0, repeat=1, ctx=None, dtype=mx_real_t):
"""Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.
Parameters
----------
start : float, optional
Start of interval. The default start value is 0.
stop : float
End of interval.
step : float, optional
Spacing between values. The default step size is 1.
repeat : int, optional
Number of times to repeat each element. The default repeat count is 1.
ctx : Context, optional
Device context. Default context is the current default context.
dtype : str or numpy.dtype, optional
The data type of the `NDArray`. The default datatype is `np.float32`.
Returns
-------
NDArray
`NDArray` of evenly spaced values in the specified range.
Examples
--------
>>> mx.nd.arange(3).asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> mx.nd.arange(2, 6).asnumpy()
array([ 2., 3., 4., 5.], dtype=float32)
>>> mx.nd.arange(2, 6, step=2).asnumpy()
array([ 2., 4.], dtype=float32)
>>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy()
array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32)
>>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy()
array([2, 2, 2, 4, 4, 4], dtype=int32)
"""
if ctx is None:
ctx = Context.default_ctx
return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
dtype=dtype, ctx=str(ctx))
# pylint: enable= no-member, protected-access, too-many-arguments
#pylint: disable= too-many-arguments, no-member, protected-access
def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None):
""" Helper function for element-wise operation.
The function will perform numpy-like broadcasting if needed and call different functions.
Parameters
--------
lhs : NDArray or numeric value
Left-hand side operand.
rhs : NDArray or numeric value
Right-hand operand,
fn_array : function
Function to be called if both lhs and rhs are of ``NDArray`` type.
fn_scalar : function
Function to be called if both lhs and rhs are numeric values.
lfn_scalar : function
Function to be called if lhs is ``NDArray`` while rhs is numeric value
rfn_scalar : function
Function to be called if lhs is numeric value while rhs is ``NDArray``;
if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
Returns
--------
NDArray
result array
"""
if isinstance(lhs, numeric_types):
if isinstance(rhs, numeric_types):
return fn_scalar(lhs, rhs)
else:
if rfn_scalar is None:
# commutative function
return lfn_scalar(rhs, float(lhs))
else:
return rfn_scalar(rhs, float(lhs))
elif isinstance(rhs, numeric_types):
return lfn_scalar(lhs, float(rhs))
elif isinstance(rhs, NDArray):
return fn_array(lhs, rhs)
else:
raise TypeError('type %s not supported' % str(type(rhs)))
#pylint: enable= too-many-arguments, no-member, protected-access
def add(lhs, rhs):
"""Returns element-wise sum of the input arrays with broadcasting.
Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and
``mx.nd.broadcast_plus(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be added.
rhs : scalar or array
Second array to be added.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise sum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x+2).asnumpy()
array([[ 3., 3., 3.],
[ 3., 3., 3.]], dtype=float32)
>>> (x+y).asnumpy()
array([[ 1., 1., 1.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.add(x,y).asnumpy()
array([[ 1., 1., 1.],
[ 2., 2., 2.]], dtype=float32)
>>> (z + y).asnumpy()
array([[ 0., 1.],
[ 1., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_add,
operator.add,
_internal._plus_scalar,
None)
# pylint: enable= no-member, protected-access
def subtract(lhs, rhs):
"""Returns element-wise difference of the input arrays with broadcasting.
Equivalent to ``lhs - rhs``, ``mx.nd.broadcast_sub(lhs, rhs)`` and
``mx.nd.broadcast_minus(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be subtracted.
rhs : scalar or array
Second array to be subtracted.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise difference of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x-2).asnumpy()
array([[-1., -1., -1.],
[-1., -1., -1.]], dtype=float32)
>>> (x-y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.subtract(x,y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> (z-y).asnumpy()
array([[ 0., 1.],
[-1., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_sub,
operator.sub,
_internal._minus_scalar,
_internal._rminus_scalar)
# pylint: enable= no-member, protected-access
def multiply(lhs, rhs):
"""Returns element-wise product of the input arrays with broadcasting.
Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be multiplied.
rhs : scalar or array
Second array to be multiplied.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise multiplication of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x*2).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x*y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.multiply(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> (z*y).asnumpy()
array([[ 0., 0.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_mul,
operator.mul,
_internal._mul_scalar,
None)
# pylint: enable= no-member, protected-access
def divide(lhs, rhs):
"""Returns element-wise division of the input arrays with broadcasting.
Equivalent to ``lhs / rhs`` and ``mx.nd.broadcast_div(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array in division.
rhs : scalar or array
Second array in division.
The arrays to be divided. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise division of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))*6
>>> y = mx.nd.ones((2,1))*2
>>> x.asnumpy()
array([[ 6., 6., 6.],
[ 6., 6., 6.]], dtype=float32)
>>> y.asnumpy()
array([[ 2.],
[ 2.]], dtype=float32)
>>> x/2
<NDArray 2x3 @cpu(0)>
>>> (x/3).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x/y).asnumpy()
array([[ 3., 3., 3.],
[ 3., 3., 3.]], dtype=float32)
>>> mx.nd.divide(x,y).asnumpy()
array([[ 3., 3., 3.],
[ 3., 3., 3.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_div,
operator.truediv,
_internal._div_scalar,
_internal._rdiv_scalar)
# pylint: enable= no-member, protected-access
def modulo(lhs, rhs):
"""Returns element-wise modulo of the input arrays with broadcasting.
Equivalent to ``lhs % rhs`` and ``mx.nd.broadcast_mod(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array in modulo.
rhs : scalar or array
Second array in modulo.
The arrays to be taken modulo. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise modulo of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))*6
>>> y = mx.nd.ones((2,1))*4
>>> x.asnumpy()
array([[ 6., 6., 6.],
[ 6., 6., 6.]], dtype=float32)
>>> y.asnumpy()
array([[ 4.],
[ 4.]], dtype=float32)
>>> x%5
<NDArray 2x3 @cpu(0)>
>>> (x%5).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x%y).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.modulo(x,y).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_mod,
operator.mod,
_internal._mod_scalar,
_internal._rmod_scalar)
# pylint: enable= no-member, protected-access
def power(base, exp):
"""Returns result of first array elements raised to powers from second array, element-wise
with broadcasting.
Equivalent to ``base ** exp`` and ``mx.nd.broadcast_power(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
base : scalar or NDArray
The base array
exp : scalar or NDArray
The exponent array. If ``base.shape != exp.shape``, they must be
broadcastable to a common shape.
Returns
--------
NDArray
The bases in x raised to the exponents in y.
Examples
--------
>>> x = mx.nd.ones((2,3))*2
>>> y = mx.nd.arange(1,3).reshape((2,1))
>>> z = mx.nd.arange(1,3).reshape((2,1))
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> y.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> z.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> (x**2).asnumpy()
array([[ 4., 4., 4.],
[ 4., 4., 4.]], dtype=float32)
>>> (x**y).asnumpy()
array([[ 2., 2., 2.],
[ 4., 4., 4.]], dtype=float32)
>>> mx.nd.power(x,y).asnumpy()
array([[ 2., 2., 2.],
[ 4., 4., 4.]], dtype=float32)
>>> (z**y).asnumpy()
array([[ 1.],
[ 4.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
base,
exp,
broadcast_power,
operator.pow,
_internal._power_scalar,
_internal._rpower_scalar)
# pylint: enable= no-member, protected-access
def maximum(lhs, rhs):
"""Returns element-wise maximum of the input arrays with broadcasting.
Equivalent to ``mx.nd.broadcast_maximum(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise maximum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> mx.nd.maximum(x, 2).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.maximum(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.maximum(y, z).asnumpy()
array([[ 0., 1.],
[ 1., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_maximum,
lambda x, y: x if x > y else y,
_internal._maximum_scalar,
None)
# pylint: enable= no-member, protected-access
def minimum(lhs, rhs):
"""Returns element-wise minimum of the input arrays with broadcasting.
Equivalent to ``mx.nd.broadcast_minimum(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise minimum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> mx.nd.minimum(x, 2).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.minimum(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.minimum(z, y).asnumpy()
array([[ 0., 0.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_minimum,
lambda x, y: x if x < y else y,
_internal._minimum_scalar,
None)
# pylint: enable= no-member, protected-access
def equal(lhs, rhs):
"""Returns the result of element-wise **equal to** (==) comparison operation with
broadcasting.
For each element in input arrays, return 1(true) if corresponding elements are same,
otherwise return 0(false).
Equivalent to ``lhs == rhs`` and ``mx.nd.broadcast_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x == 1).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x == y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.equal(x,y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> (z == y).asnumpy()
array([[ 1., 0.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_equal,
lambda x, y: 1 if x == y else 0,
_internal._equal_scalar,
None)
# pylint: enable= no-member, protected-access
def not_equal(lhs, rhs):
"""Returns the result of element-wise **not equal to** (!=) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if corresponding elements are different,
otherwise return 0(false).
Equivalent to ``lhs != rhs`` and ``mx.nd.broadcast_not_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (z == y).asnumpy()
array([[ 1., 0.],
[ 0., 1.]], dtype=float32)
>>> (x != 1).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (x != y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.not_equal(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> (z != y).asnumpy()
array([[ 0., 1.],
[ 1., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_not_equal,
lambda x, y: 1 if x != y else 0,
_internal._not_equal_scalar,
None)
# pylint: enable= no-member, protected-access
def greater(lhs, rhs):
"""Returns the result of element-wise **greater than** (>) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are greater than rhs,
otherwise return 0(false).
Equivalent to ``lhs > rhs`` and ``mx.nd.broadcast_greater(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x > 1).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (x > y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.greater(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> (z > y).asnumpy()
array([[ 0., 1.],
[ 0., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_greater,
lambda x, y: 1 if x > y else 0,
_internal._greater_scalar,
_internal._lesser_scalar)
# pylint: enable= no-member, protected-access
def greater_equal(lhs, rhs):
"""Returns the result of element-wise **greater than or equal to** (>=) comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are greater than equal to rhs,
otherwise return 0(false).
Equivalent to ``lhs >= rhs`` and ``mx.nd.broadcast_greater_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x >= 1).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x >= y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.greater_equal(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (z >= y).asnumpy()
array([[ 1., 1.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_greater_equal,
lambda x, y: 1 if x >= y else 0,
_internal._greater_equal_scalar,
_internal._lesser_equal_scalar)
# pylint: enable= no-member, protected-access
def lesser(lhs, rhs):
"""Returns the result of element-wise **lesser than** (<) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are less than rhs,
otherwise return 0(false).
Equivalent to ``lhs < rhs`` and ``mx.nd.broadcast_lesser(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x < 1).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (x < y).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.lesser(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (z < y).asnumpy()
array([[ 0., 0.],
[ 1., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_lesser,
lambda x, y: 1 if x < y else 0,
_internal._lesser_scalar,
_internal._greater_scalar)
# pylint: enable= no-member, protected-access
def lesser_equal(lhs, rhs):
"""Returns the result of element-wise **lesser than or equal to** (<=) comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are
lesser than equal to rhs, otherwise return 0(false).
Equivalent to ``lhs <= rhs`` and ``mx.nd.broadcast_lesser_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x <= 1).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x <= y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.lesser_equal(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> (z <= y).asnumpy()
array([[ 1., 0.],
[ 1., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_lesser_equal,
lambda x, y: 1 if x <= y else 0,
_internal._lesser_equal_scalar,
_internal._greater_equal_scalar)
# pylint: enable= no-member, protected-access
def true_divide(lhs, rhs):
"""This function is similar to :meth:`divide`.
"""
return divide(lhs, rhs)
def negative(arr):
"""Numerical negative, element-wise.
Equals ``-arr``
Parameters
----------
arr : NDArray
The input array
Returns
-------
NDArray
``-arr``
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> (-x).asnumpy()
array([[-1., -1., -1.],
[-1., -1., -1.]], dtype=float32)
"""
return multiply(arr, -1.0)
def load(fname):
"""Loads an array from file.
See more details in ``save``.
Parameters
----------
fname : str
The filename.
Returns
-------
list of NDArray or dict of str to NDArray
Loaded data.
"""
if not isinstance(fname, string_types):
raise TypeError('fname required to be a string')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoad(c_str(fname),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [NDArray(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), NDArray(NDArrayHandle(handles[i]))) for i in range(out_size.value))
def save(fname, data):
"""Saves a list of arrays or a dict of str->array to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
fname : str
The filename.
data : list of ``NDArray` or dict of str to ``NDArray``
The data to save.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> y = mx.nd.ones((1,4))
>>> mx.nd.save('my_list', [x,y])
>>> mx.nd.save('my_dict', {'x':x, 'y':y})
>>> mx.nd.load('my_list')
[<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]
>>> mx.nd.load('my_dict')
{'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>}
"""
handles = []
if isinstance(data, dict):
keys = []
for key, val in data.items():
if not isinstance(key, string_types):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
if not isinstance(val, NDArray):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys.append(c_str(key))
handles.append(val.handle)
keys = c_array(ctypes.c_char_p, keys)
else:
for val in data:
if not isinstance(val, NDArray):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
handles.append(val.handle)
keys = None
check_call(_LIB.MXNDArraySave(c_str(fname),
mx_uint(len(handles)),
c_array(NDArrayHandle, handles),
keys))
def concatenate(arrays, axis=0, always_copy=True):
"""DEPRECATED, use ``concat`` instead
Parameters
----------
arrays : list of `NDArray`
Arrays to be concatenate. They must have identical shape except
the first dimension. They also must have the same data type.
axis : int
The axis along which to concatenate.
always_copy : bool
Default `True`. When not `True`, if the arrays only contain one
`NDArray`, that element will be returned directly, avoid copying.
Returns
-------
NDArray
An `NDArray` that lives on the same context as `arrays[0].context`.
"""
assert isinstance(arrays, list)
assert len(arrays) > 0
assert isinstance(arrays[0], NDArray)
if not always_copy and len(arrays) == 1:
return arrays[0]
shape_axis = arrays[0].shape[axis]
shape_rest1 = arrays[0].shape[0:axis]
shape_rest2 = arrays[0].shape[axis+1:]
dtype = arrays[0].dtype
for arr in arrays[1:]:
shape_axis += arr.shape[axis]
assert shape_rest1 == arr.shape[0:axis]
assert shape_rest2 == arr.shape[axis+1:]
assert dtype == arr.dtype
ret_shape = shape_rest1 + (shape_axis,) + shape_rest2
ret = empty(ret_shape, ctx=arrays[0].context, dtype=dtype)
idx = 0
begin = [0 for _ in ret_shape]
end = list(ret_shape)
for arr in arrays:
if axis == 0:
ret[idx:idx+arr.shape[0]] = arr
else:
begin[axis] = idx
end[axis] = idx+arr.shape[axis]
# pylint: disable=no-member,protected-access
_internal._crop_assign(ret, arr, out=ret,
begin=tuple(begin),
end=tuple(end))
# pylint: enable=no-member,protected-access
idx += arr.shape[axis]
return ret
def imdecode(str_img, clip_rect=(0, 0, 0, 0), out=None, index=0, channels=3, mean=None):
"""DEPRECATED, use mx.img instead
Parameters
----------
str_img : str
Binary image data
clip_rect : iterable of 4 int
Clip decoded image to rectangle (x0, y0, x1, y1).
out : NDArray
Output buffer. Can be 3 dimensional (c, h, w) or 4 dimensional (n, c, h, w).
index : int
Output decoded image to i-th slice of 4 dimensional buffer.
channels : int
Number of channels to output. Decode to grey scale when channels = 1.
mean : NDArray
Subtract mean from decode image before outputing.
"""
# pylint: disable= no-member, protected-access, too-many-arguments
if mean is None:
mean = NDArray(_new_empty_handle())
if out is None:
return _internal._imdecode(mean, index,
clip_rect[0],
clip_rect[1],
clip_rect[2],
clip_rect[3],
channels,
len(str_img),
str_img=str_img)
else:
return _internal._imdecode(mean, index,
clip_rect[0],
clip_rect[1],
clip_rect[2],
clip_rect[3],
channels,
len(str_img),
str_img=str_img,
out=out)
# pylint: disable=too-many-locals, invalid-name
def _make_ndarray_function(handle, name):
"""Create a NDArray function from the FunctionHandle."""
real_name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
key_var_num_args = ctypes.c_char_p()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXSymbolGetAtomicSymbolInfo(
handle, ctypes.byref(real_name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(key_var_num_args),
ctypes.byref(ret_type)))
narg = int(num_args.value)
arg_names = [py_str(arg_names[i]) for i in range(narg)]
arg_types = [py_str(arg_types[i]) for i in range(narg)]
func_name = name
key_var_num_args = py_str(key_var_num_args.value)
ret_type = py_str(ret_type.value) if ret_type.value is not None else ''
doc_str = _build_doc(func_name,
py_str(desc.value),
arg_names,
arg_types,
[py_str(arg_descs[i]) for i in range(narg)],
key_var_num_args,
ret_type)
dtype_name = None
arr_name = None
ndsignature = []
signature = []
ndarg_names = []
kwarg_names = []
for i in range(narg):
name, atype = arg_names[i], arg_types[i]
if name == 'dtype':
dtype_name = name
signature.append('%s=_Null'%name)
elif atype.startswith('NDArray') or atype.startswith('Symbol'):
assert not arr_name, \
"Op can only have one argument with variable " \
"size and it must be the last argument."
if atype.endswith('[]'):
ndsignature.append('*%s'%name)
arr_name = name
else:
ndsignature.append('%s=None'%name)
ndarg_names.append(name)
else:
signature.append('%s=_Null'%name)
kwarg_names.append(name)
#signature.append('is_train=False')
signature.append('out=None')
signature.append('name=None')
signature.append('**kwargs')
signature = ndsignature + signature
code = []
if arr_name:
code.append("""
def %s(*%s, **kwargs):"""%(func_name, arr_name))
code.append("""
ndargs = []
for i in {}:
assert isinstance(i, NDArrayBase), \\
"Positional arguments must have NDArray type, " \\
"but got %s"%str(i)
ndargs.append(i)""".format(arr_name))
if dtype_name is not None:
code.append("""
if '%s' in kwargs:
kwargs['%s'] = np.dtype(kwargs['%s']).name"""%(
dtype_name, dtype_name, dtype_name))
code.append("""
_ = kwargs.pop('name', None)
out = kwargs.pop('out', None)
keys = list(kwargs.keys())
vals = list(kwargs.values())""")
else:
code.append("""
def %s(%s):
ndargs = []
keys = list(kwargs.keys())
vals = list(kwargs.values())"""%(func_name, ', '.join(signature)))
# NDArray args
for name in ndarg_names: # pylint: disable=redefined-argument-from-local
code.append("""
if {name} is not None:
assert isinstance({name}, NDArrayBase), \\
"Argument {name} must have NDArray type, but got %s"%str({name})
ndargs.append({name})""".format(name=name))
# kwargs
for name in kwarg_names: # pylint: disable=redefined-argument-from-local
code.append("""
if %s is not _Null:
keys.append('%s')
vals.append(%s)"""%(name, name, name))
# dtype
if dtype_name is not None:
code.append("""
if %s is not _Null:
keys.append('%s')
vals.append(np.dtype(%s).name)"""%(dtype_name, dtype_name, dtype_name))
code.append("""
return _imperative_invoke(%d, ndargs, keys, vals, out)"""%(
handle.value))
local = {}
exec(''.join(code), None, local) # pylint: disable=exec-used
ndarray_function = local[func_name]
ndarray_function.__name__ = func_name
ndarray_function.__doc__ = doc_str
ndarray_function.__module__ = 'mxnet.ndarray'
return ndarray_function
# pylint: enable=too-many-locals, invalid-name
def _init_ndarray_module(ndarray_class, root_namespace):
"""List and add all the ndarray functions to current module."""
_set_ndarray_class(ndarray_class)
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListAllOpNames(ctypes.byref(size),
ctypes.byref(plist)))
op_names = []
for i in range(size.value):
op_names.append(py_str(plist[i]))
module_obj = _sys.modules["%s.ndarray" % root_namespace]
module_internal = _sys.modules["%s._ndarray_internal" % root_namespace]
module_contrib = _sys.modules["%s.contrib.ndarray" % root_namespace]
for name in op_names:
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
function = _make_ndarray_function(hdl, name)
if function.__name__.startswith('_contrib_'):
function.__name__ = function.__name__[9:]
function.__module__ = 'mxnet.contrib.ndarray'
setattr(module_contrib, function.__name__, function)
elif function.__name__.startswith('_'):
setattr(module_internal, function.__name__, function)
else:
setattr(module_obj, function.__name__, function)
_init_ndarray_module(NDArray, "mxnet")
# from .base import add_fileline_to_docstring
# add_fileline_to_docstring(__name__)
| apache-2.0 |
cloudera/hue | desktop/core/ext-py/oauth2client-4.1.3/oauth2client/contrib/django_util/__init__.py | 39 | 18152 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the Django web framework.
Provides Django views and helpers the make using the OAuth2 web server
flow easier. It includes an ``oauth_required`` decorator to automatically
ensure that user credentials are available, and an ``oauth_enabled`` decorator
to check if the user has authorized, and helper shortcuts to create the
authorization URL otherwise.
There are two basic use cases supported. The first is using Google OAuth as the
primary form of authentication, which is the simpler approach recommended
for applications without their own user system.
The second use case is adding Google OAuth credentials to an
existing Django model containing a Django user field. Most of the
configuration is the same, except for `GOOGLE_OAUTH_MODEL_STORAGE` in
settings.py. See "Adding Credentials To An Existing Django User System" for
usage differences.
Only Django versions 1.8+ are supported.
Configuration
===============
To configure, you'll need a set of OAuth2 web application credentials from
`Google Developer's Console <https://console.developers.google.com/project/_/apiui/credential>`.
Add the helper to your INSTALLED_APPS:
.. code-block:: python
:caption: settings.py
:name: installed_apps
INSTALLED_APPS = (
# other apps
"django.contrib.sessions.middleware"
"oauth2client.contrib.django_util"
)
This helper also requires the Django Session Middleware, so
``django.contrib.sessions.middleware`` should be in INSTALLED_APPS as well.
MIDDLEWARE or MIDDLEWARE_CLASSES (in Django versions <1.10) should also
contain the string 'django.contrib.sessions.middleware.SessionMiddleware'.
Add the client secrets created earlier to the settings. You can either
specify the path to the credentials file in JSON format
.. code-block:: python
:caption: settings.py
:name: secrets_file
GOOGLE_OAUTH2_CLIENT_SECRETS_JSON=/path/to/client-secret.json
Or, directly configure the client Id and client secret.
.. code-block:: python
:caption: settings.py
:name: secrets_config
GOOGLE_OAUTH2_CLIENT_ID=client-id-field
GOOGLE_OAUTH2_CLIENT_SECRET=client-secret-field
By default, the default scopes for the required decorator only contains the
``email`` scopes. You can change that default in the settings.
.. code-block:: python
:caption: settings.py
:name: scopes
GOOGLE_OAUTH2_SCOPES = ('email', 'https://www.googleapis.com/auth/calendar',)
By default, the decorators will add an `oauth` object to the Django request
object, and include all of its state and helpers inside that object. If the
`oauth` name conflicts with another usage, it can be changed
.. code-block:: python
:caption: settings.py
:name: request_prefix
# changes request.oauth to request.google_oauth
GOOGLE_OAUTH2_REQUEST_ATTRIBUTE = 'google_oauth'
Add the oauth2 routes to your application's urls.py urlpatterns.
.. code-block:: python
:caption: urls.py
:name: urls
from oauth2client.contrib.django_util.site import urls as oauth2_urls
urlpatterns += [url(r'^oauth2/', include(oauth2_urls))]
To require OAuth2 credentials for a view, use the `oauth2_required` decorator.
This creates a credentials object with an id_token, and allows you to create
an `http` object to build service clients with. These are all attached to the
request.oauth
.. code-block:: python
:caption: views.py
:name: views_required
from oauth2client.contrib.django_util.decorators import oauth_required
@oauth_required
def requires_default_scopes(request):
email = request.oauth.credentials.id_token['email']
service = build(serviceName='calendar', version='v3',
http=request.oauth.http,
developerKey=API_KEY)
events = service.events().list(calendarId='primary').execute()['items']
return HttpResponse("email: {0} , calendar: {1}".format(
email,str(events)))
return HttpResponse(
"email: {0} , calendar: {1}".format(email, str(events)))
To make OAuth2 optional and provide an authorization link in your own views.
.. code-block:: python
:caption: views.py
:name: views_enabled2
from oauth2client.contrib.django_util.decorators import oauth_enabled
@oauth_enabled
def optional_oauth2(request):
if request.oauth.has_credentials():
# this could be passed into a view
# request.oauth.http is also initialized
return HttpResponse("User email: {0}".format(
request.oauth.credentials.id_token['email']))
else:
return HttpResponse(
'Here is an OAuth Authorize link: <a href="{0}">Authorize'
'</a>'.format(request.oauth.get_authorize_redirect()))
If a view needs a scope not included in the default scopes specified in
the settings, you can use [incremental auth](https://developers.google.com/identity/sign-in/web/incremental-auth)
and specify additional scopes in the decorator arguments.
.. code-block:: python
:caption: views.py
:name: views_required_additional_scopes
@oauth_enabled(scopes=['https://www.googleapis.com/auth/drive'])
def drive_required(request):
if request.oauth.has_credentials():
service = build(serviceName='drive', version='v2',
http=request.oauth.http,
developerKey=API_KEY)
events = service.files().list().execute()['items']
return HttpResponse(str(events))
else:
return HttpResponse(
'Here is an OAuth Authorize link: <a href="{0}">Authorize'
'</a>'.format(request.oauth.get_authorize_redirect()))
To provide a callback on authorization being completed, use the
oauth2_authorized signal:
.. code-block:: python
:caption: views.py
:name: signals
from oauth2client.contrib.django_util.signals import oauth2_authorized
def test_callback(sender, request, credentials, **kwargs):
print("Authorization Signal Received {0}".format(
credentials.id_token['email']))
oauth2_authorized.connect(test_callback)
Adding Credentials To An Existing Django User System
=====================================================
As an alternative to storing the credentials in the session, the helper
can be configured to store the fields on a Django model. This might be useful
if you need to use the credentials outside the context of a user request. It
also prevents the need for a logged in user to repeat the OAuth flow when
starting a new session.
To use, change ``settings.py``
.. code-block:: python
:caption: settings.py
:name: storage_model_config
GOOGLE_OAUTH2_STORAGE_MODEL = {
'model': 'path.to.model.MyModel',
'user_property': 'user_id',
'credentials_property': 'credential'
}
Where ``path.to.model`` class is the fully qualified name of a
``django.db.model`` class containing a ``django.contrib.auth.models.User``
field with the name specified by `user_property` and a
:class:`oauth2client.contrib.django_util.models.CredentialsField` with the name
specified by `credentials_property`. For the sample configuration given,
our model would look like
.. code-block:: python
:caption: models.py
:name: storage_model_model
from django.contrib.auth.models import User
from oauth2client.contrib.django_util.models import CredentialsField
class MyModel(models.Model):
# ... other fields here ...
user = models.OneToOneField(User)
credential = CredentialsField()
"""
import importlib
import django.conf
from django.core import exceptions
from django.core import urlresolvers
from six.moves.urllib import parse
from oauth2client import clientsecrets
from oauth2client import transport
from oauth2client.contrib import dictionary_storage
from oauth2client.contrib.django_util import storage
GOOGLE_OAUTH2_DEFAULT_SCOPES = ('email',)
GOOGLE_OAUTH2_REQUEST_ATTRIBUTE = 'oauth'
def _load_client_secrets(filename):
"""Loads client secrets from the given filename.
Args:
filename: The name of the file containing the JSON secret key.
Returns:
A 2-tuple, the first item containing the client id, and the second
item containing a client secret.
"""
client_type, client_info = clientsecrets.loadfile(filename)
if client_type != clientsecrets.TYPE_WEB:
raise ValueError(
'The flow specified in {} is not supported, only the WEB flow '
'type is supported.'.format(client_type))
return client_info['client_id'], client_info['client_secret']
def _get_oauth2_client_id_and_secret(settings_instance):
"""Initializes client id and client secret based on the settings.
Args:
settings_instance: An instance of ``django.conf.settings``.
Returns:
A 2-tuple, the first item is the client id and the second
item is the client secret.
"""
secret_json = getattr(settings_instance,
'GOOGLE_OAUTH2_CLIENT_SECRETS_JSON', None)
if secret_json is not None:
return _load_client_secrets(secret_json)
else:
client_id = getattr(settings_instance, "GOOGLE_OAUTH2_CLIENT_ID",
None)
client_secret = getattr(settings_instance,
"GOOGLE_OAUTH2_CLIENT_SECRET", None)
if client_id is not None and client_secret is not None:
return client_id, client_secret
else:
raise exceptions.ImproperlyConfigured(
"Must specify either GOOGLE_OAUTH2_CLIENT_SECRETS_JSON, or "
"both GOOGLE_OAUTH2_CLIENT_ID and "
"GOOGLE_OAUTH2_CLIENT_SECRET in settings.py")
def _get_storage_model():
"""This configures whether the credentials will be stored in the session
or the Django ORM based on the settings. By default, the credentials
will be stored in the session, unless `GOOGLE_OAUTH2_STORAGE_MODEL`
is found in the settings. Usually, the ORM storage is used to integrate
credentials into an existing Django user system.
Returns:
A tuple containing three strings, or None. If
``GOOGLE_OAUTH2_STORAGE_MODEL`` is configured, the tuple
will contain the fully qualifed path of the `django.db.model`,
the name of the ``django.contrib.auth.models.User`` field on the
model, and the name of the
:class:`oauth2client.contrib.django_util.models.CredentialsField`
field on the model. If Django ORM storage is not configured,
this function returns None.
"""
storage_model_settings = getattr(django.conf.settings,
'GOOGLE_OAUTH2_STORAGE_MODEL', None)
if storage_model_settings is not None:
return (storage_model_settings['model'],
storage_model_settings['user_property'],
storage_model_settings['credentials_property'])
else:
return None, None, None
class OAuth2Settings(object):
"""Initializes Django OAuth2 Helper Settings
This class loads the OAuth2 Settings from the Django settings, and then
provides those settings as attributes to the rest of the views and
decorators in the module.
Attributes:
scopes: A list of OAuth2 scopes that the decorators and views will use
as defaults.
request_prefix: The name of the attribute that the decorators use to
attach the UserOAuth2 object to the Django request object.
client_id: The OAuth2 Client ID.
client_secret: The OAuth2 Client Secret.
"""
def __init__(self, settings_instance):
self.scopes = getattr(settings_instance, 'GOOGLE_OAUTH2_SCOPES',
GOOGLE_OAUTH2_DEFAULT_SCOPES)
self.request_prefix = getattr(settings_instance,
'GOOGLE_OAUTH2_REQUEST_ATTRIBUTE',
GOOGLE_OAUTH2_REQUEST_ATTRIBUTE)
info = _get_oauth2_client_id_and_secret(settings_instance)
self.client_id, self.client_secret = info
# Django 1.10 deprecated MIDDLEWARE_CLASSES in favor of MIDDLEWARE
middleware_settings = getattr(settings_instance, 'MIDDLEWARE', None)
if middleware_settings is None:
middleware_settings = getattr(
settings_instance, 'MIDDLEWARE_CLASSES', None)
if middleware_settings is None:
raise exceptions.ImproperlyConfigured(
'Django settings has neither MIDDLEWARE nor MIDDLEWARE_CLASSES'
'configured')
if ('django.contrib.sessions.middleware.SessionMiddleware' not in
middleware_settings):
raise exceptions.ImproperlyConfigured(
'The Google OAuth2 Helper requires session middleware to '
'be installed. Edit your MIDDLEWARE_CLASSES or MIDDLEWARE '
'setting to include \'django.contrib.sessions.middleware.'
'SessionMiddleware\'.')
(self.storage_model, self.storage_model_user_property,
self.storage_model_credentials_property) = _get_storage_model()
oauth2_settings = OAuth2Settings(django.conf.settings)
_CREDENTIALS_KEY = 'google_oauth2_credentials'
def get_storage(request):
""" Gets a Credentials storage object provided by the Django OAuth2 Helper
object.
Args:
request: Reference to the current request object.
Returns:
An :class:`oauth2.client.Storage` object.
"""
storage_model = oauth2_settings.storage_model
user_property = oauth2_settings.storage_model_user_property
credentials_property = oauth2_settings.storage_model_credentials_property
if storage_model:
module_name, class_name = storage_model.rsplit('.', 1)
module = importlib.import_module(module_name)
storage_model_class = getattr(module, class_name)
return storage.DjangoORMStorage(storage_model_class,
user_property,
request.user,
credentials_property)
else:
# use session
return dictionary_storage.DictionaryStorage(
request.session, key=_CREDENTIALS_KEY)
def _redirect_with_params(url_name, *args, **kwargs):
"""Helper method to create a redirect response with URL params.
This builds a redirect string that converts kwargs into a
query string.
Args:
url_name: The name of the url to redirect to.
kwargs: the query string param and their values to build.
Returns:
A properly formatted redirect string.
"""
url = urlresolvers.reverse(url_name, args=args)
params = parse.urlencode(kwargs, True)
return "{0}?{1}".format(url, params)
def _credentials_from_request(request):
"""Gets the authorized credentials for this flow, if they exist."""
# ORM storage requires a logged in user
if (oauth2_settings.storage_model is None or
request.user.is_authenticated()):
return get_storage(request).get()
else:
return None
class UserOAuth2(object):
"""Class to create oauth2 objects on Django request objects containing
credentials and helper methods.
"""
def __init__(self, request, scopes=None, return_url=None):
"""Initialize the Oauth2 Object.
Args:
request: Django request object.
scopes: Scopes desired for this OAuth2 flow.
return_url: The url to return to after the OAuth flow is complete,
defaults to the request's current URL path.
"""
self.request = request
self.return_url = return_url or request.get_full_path()
if scopes:
self._scopes = set(oauth2_settings.scopes) | set(scopes)
else:
self._scopes = set(oauth2_settings.scopes)
def get_authorize_redirect(self):
"""Creates a URl to start the OAuth2 authorization flow."""
get_params = {
'return_url': self.return_url,
'scopes': self._get_scopes()
}
return _redirect_with_params('google_oauth:authorize', **get_params)
def has_credentials(self):
"""Returns True if there are valid credentials for the current user
and required scopes."""
credentials = _credentials_from_request(self.request)
return (credentials and not credentials.invalid and
credentials.has_scopes(self._get_scopes()))
def _get_scopes(self):
"""Returns the scopes associated with this object, kept up to
date for incremental auth."""
if _credentials_from_request(self.request):
return (self._scopes |
_credentials_from_request(self.request).scopes)
else:
return self._scopes
@property
def scopes(self):
"""Returns the scopes associated with this OAuth2 object."""
# make sure previously requested custom scopes are maintained
# in future authorizations
return self._get_scopes()
@property
def credentials(self):
"""Gets the authorized credentials for this flow, if they exist."""
return _credentials_from_request(self.request)
@property
def http(self):
"""Helper: create HTTP client authorized with OAuth2 credentials."""
if self.has_credentials():
return self.credentials.authorize(transport.get_http_object())
return None
| apache-2.0 |
sbalde/edxplatform | cms/djangoapps/contentstore/management/commands/tests/test_reindex_courses.py | 32 | 6714 | """ Tests for course reindex command """
import ddt
from django.core.management import call_command, CommandError
import mock
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from common.test.utils import nostderr
from xmodule.modulestore.tests.factories import CourseFactory, LibraryFactory
from contentstore.management.commands.reindex_course import Command as ReindexCommand
from contentstore.courseware_index import SearchIndexingError
@ddt.ddt
class TestReindexCourse(ModuleStoreTestCase):
""" Tests for course reindex command """
def setUp(self):
""" Setup method - create courses """
super(TestReindexCourse, self).setUp()
self.store = modulestore()
self.first_lib = LibraryFactory.create(
org="test", library="lib1", display_name="run1", default_store=ModuleStoreEnum.Type.split
)
self.second_lib = LibraryFactory.create(
org="test", library="lib2", display_name="run2", default_store=ModuleStoreEnum.Type.split
)
self.first_course = CourseFactory.create(
org="test", course="course1", display_name="run1"
)
self.second_course = CourseFactory.create(
org="test", course="course2", display_name="run1"
)
REINDEX_PATH_LOCATION = 'contentstore.management.commands.reindex_course.CoursewareSearchIndexer.do_course_reindex'
MODULESTORE_PATCH_LOCATION = 'contentstore.management.commands.reindex_course.modulestore'
YESNO_PATCH_LOCATION = 'contentstore.management.commands.reindex_course.query_yes_no'
def _get_lib_key(self, library):
""" Get's library key as it is passed to indexer """
return library.location.library_key
def _build_calls(self, *courses):
""" Builds a list of mock.call instances representing calls to reindexing method """
return [mock.call(self.store, course.id) for course in courses]
def test_given_no_arguments_raises_command_error(self):
""" Test that raises CommandError for incorrect arguments """
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(CommandError, ".* requires one or more arguments .*"):
call_command('reindex_course')
@ddt.data('qwerty', 'invalid_key', 'xblock-v1:qwe+rty')
def test_given_invalid_course_key_raises_not_found(self, invalid_key):
""" Test that raises InvalidKeyError for invalid keys """
errstring = "Invalid course_key: '%s'." % invalid_key
with self.assertRaises(SystemExit) as ex:
with self.assertRaisesRegexp(CommandError, errstring):
call_command('reindex_course', invalid_key)
self.assertEqual(ex.exception.code, 1)
def test_given_library_key_raises_command_error(self):
""" Test that raises CommandError if library key is passed """
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(SearchIndexingError, ".* is not a course key"):
call_command('reindex_course', unicode(self._get_lib_key(self.first_lib)))
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(SearchIndexingError, ".* is not a course key"):
call_command('reindex_course', unicode(self._get_lib_key(self.second_lib)))
with self.assertRaises(SystemExit), nostderr():
with self.assertRaisesRegexp(SearchIndexingError, ".* is not a course key"):
call_command(
'reindex_course',
unicode(self.second_course.id),
unicode(self._get_lib_key(self.first_lib))
)
def test_given_id_list_indexes_courses(self):
""" Test that reindexes courses when given single course key or a list of course keys """
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index, \
mock.patch(self.MODULESTORE_PATCH_LOCATION, mock.Mock(return_value=self.store)):
call_command('reindex_course', unicode(self.first_course.id))
self.assertEqual(patched_index.mock_calls, self._build_calls(self.first_course))
patched_index.reset_mock()
call_command('reindex_course', unicode(self.second_course.id))
self.assertEqual(patched_index.mock_calls, self._build_calls(self.second_course))
patched_index.reset_mock()
call_command(
'reindex_course',
unicode(self.first_course.id),
unicode(self.second_course.id)
)
expected_calls = self._build_calls(self.first_course, self.second_course)
self.assertEqual(patched_index.mock_calls, expected_calls)
def test_given_all_key_prompts_and_reindexes_all_courses(self):
""" Test that reindexes all courses when --all key is given and confirmed """
with mock.patch(self.YESNO_PATCH_LOCATION) as patched_yes_no:
patched_yes_no.return_value = True
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index, \
mock.patch(self.MODULESTORE_PATCH_LOCATION, mock.Mock(return_value=self.store)):
call_command('reindex_course', all=True)
patched_yes_no.assert_called_once_with(ReindexCommand.CONFIRMATION_PROMPT, default='no')
expected_calls = self._build_calls(self.first_course, self.second_course)
self.assertItemsEqual(patched_index.mock_calls, expected_calls)
def test_given_all_key_prompts_and_reindexes_all_courses_cancelled(self):
""" Test that does not reindex anything when --all key is given and cancelled """
with mock.patch(self.YESNO_PATCH_LOCATION) as patched_yes_no:
patched_yes_no.return_value = False
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index, \
mock.patch(self.MODULESTORE_PATCH_LOCATION, mock.Mock(return_value=self.store)):
call_command('reindex_course', all=True)
patched_yes_no.assert_called_once_with(ReindexCommand.CONFIRMATION_PROMPT, default='no')
patched_index.assert_not_called()
def test_fail_fast_if_reindex_fails(self):
""" Test that fails on first reindexing exception """
with mock.patch(self.REINDEX_PATH_LOCATION) as patched_index:
patched_index.side_effect = SearchIndexingError("message", [])
with self.assertRaises(SearchIndexingError):
call_command('reindex_course', unicode(self.second_course.id))
| agpl-3.0 |
tumbl3w33d/ansible | test/units/modules/network/netvisor/test_pn_log_audit_exception.py | 21 | 2610 | # Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.netvisor import pn_log_audit_exception
from units.modules.utils import set_module_args
from .nvos_module import TestNvosModule, load_fixture
class TestLogAuditExceptionModule(TestNvosModule):
module = pn_log_audit_exception
def setUp(self):
self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_log_audit_exception.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
self.mock_run_check_cli = patch('ansible.modules.network.netvisor.pn_log_audit_exception.check_cli')
self.run_check_cli = self.mock_run_check_cli.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
self.mock_run_check_cli.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['present'] == 'log-audit-exception-create':
results = dict(
changed=True,
cli_cmd=cli
)
elif state_map['absent'] == 'log-audit-exception-delete':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
if state == 'present':
self.run_check_cli.return_value = False
if state == 'absent':
self.run_check_cli.return_value = True
def test_log_audit_exception_create(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_audit_type': 'cli',
'pn_pattern': 'test', 'pn_scope': 'local', 'pn_access': 'any', 'state': 'present'})
result = self.execute_module(changed=True, state='present')
expected_cmd = ' switch sw01 log-audit-exception-create cli pattern test any scope local '
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_log_audit_exception_delete(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_audit_type': 'cli',
'pn_pattern': 'test', 'pn_access': 'any', 'state': 'absent'})
result = self.execute_module(changed=True, state='absent')
expected_cmd = ' switch sw01 log-audit-exception-delete cli pattern test any'
self.assertEqual(result['cli_cmd'], expected_cmd)
| gpl-3.0 |
kajgan/e2 | lib/python/Tools/ASCIItranslit.py | 84 | 4273 | # -*- coding:utf-8 -*-
ASCIItranslit = { \
0x0022: "''", \
0x002A: "_", \
0x002F: "_", \
0x003A: "_", \
0x003C: "_", \
0x003D: "_", \
0x003E: "_", \
0x003F: "_", \
0x005C: "_", \
0x007C: "_", \
0x007F: "", \
0x00A0: "_", \
0x00A1: "!", \
0x00A2: "c", \
0x00A3: "lb", \
0x00A4: "", \
0x00A5: "yen", \
0x00A6: "I", \
0x00A7: "SS", \
0x00A8: "'", \
0x00A9: "(c)", \
0x00AA: "a", \
0x00AB: "<<", \
0x00AC: "not", \
0x00AD: "-", \
0x00AE: "(R)", \
0x00AF: "", \
0x00B0: "^0", \
0x00B1: "+-", \
0x00B2: "^2", \
0x00B3: "^3", \
0x00B4: "'", \
0x00B5: "u", \
0x00B6: "P", \
0x00B7: ".", \
0x00B8: ",", \
0x00B9: "^1", \
0x00BA: "o", \
0x00BB: ">>", \
0x00BC: "1_4 ", \
0x00BD: "1_2 ", \
0x00BE: "3_4 ", \
0x00BF: "_", \
0x00C0: "`A", \
0x00C1: "'A", \
0x00C2: "^A", \
0x00C3: "~A", \
0x00C4: "Ae", \
0x00C5: "A", \
0x00C6: "AE", \
0x00C7: "C", \
0x00C8: "`E", \
0x00C9: "'E", \
0x00CA: "^E", \
0x00CB: "E", \
0x00CC: "`I", \
0x00CD: "'I", \
0x00CE: "^I", \
0x00CF: "I", \
0x00D0: "D", \
0x00D1: "~N", \
0x00D2: "`O", \
0x00D3: "'O", \
0x00D4: "^O", \
0x00D5: "~O", \
0x00D6: "Oe", \
0x00D7: "x", \
0x00D8: "O", \
0x00D9: "`U", \
0x00DA: "'U", \
0x00DB: "^U", \
0x00DC: "Ue", \
0x00DD: "'Y", \
0x00DE: "Th", \
0x00DF: "ss", \
0x00E0: "`a", \
0x00E1: "'a", \
0x00E2: "^a", \
0x00E3: "~a", \
0x00E4: "AE", \
0x00E5: "a", \
0x00E6: "ae", \
0x00E7: "c", \
0x00E8: "`e", \
0x00E9: "'e", \
0x00EA: "^e", \
0x00EB: "e", \
0x00EC: "`i", \
0x00ED: "'i", \
0x00EE: "^i", \
0x00EF: "i", \
0x00F0: "d", \
0x00F1: "~n", \
0x00F2: "`o", \
0x00F3: "'o", \
0x00F4: "^o", \
0x00F5: "~o", \
0x00F6: "oe", \
0x00F7: "_", \
0x00F8: "o", \
0x00F9: "`u", \
0x00FA: "'u", \
0x00FB: "^u", \
0x00FC: "ue", \
0x00FD: "'y", \
0x00FE: "th", \
0x00FF: "Y", \
0x0100: "A", \
0x0101: "a", \
0x0102: "A", \
0x0103: "a", \
0x0104: "A", \
0x0105: "a", \
0x0106: "'C", \
0x0107: "'c", \
0x0108: "^C", \
0x0109: "^c", \
0x010A: "C", \
0x010B: "c", \
0x010C: "C", \
0x010D: "c", \
0x010E: "D", \
0x010F: "d", \
0x0110: "D", \
0x0111: "d", \
0x0112: "E", \
0x0113: "e", \
0x0114: "E", \
0x0115: "e", \
0x0116: "E", \
0x0117: "e", \
0x0118: "E", \
0x0119: "e", \
0x011A: "E", \
0x011B: "e", \
0x011C: "^G", \
0x011D: "^g", \
0x011E: "G", \
0x011F: "g", \
0x0120: "G", \
0x0121: "g", \
0x0122: "G", \
0x0123: "g", \
0x0124: "^H", \
0x0125: "^h", \
0x0126: "H", \
0x0127: "h", \
0x0128: "~I", \
0x0129: "~i", \
0x012A: "I", \
0x012B: "i", \
0x012C: "I", \
0x012D: "i", \
0x012E: "I", \
0x012F: "i", \
0x0130: "I", \
0x0131: "i", \
0x0132: "IJ", \
0x0133: "ij", \
0x0134: "^J", \
0x0135: "^j", \
0x0136: "K", \
0x0137: "k", \
0x0138: "", \
0x0139: "L", \
0x013A: "l", \
0x013B: "L", \
0x013C: "l", \
0x013D: "L", \
0x013E: "l", \
0x013F: "L", \
0x0140: "l", \
0x0141: "L", \
0x0142: "l", \
0x0143: "'N", \
0x0144: "'n", \
0x0145: "N", \
0x0146: "n", \
0x0147: "N", \
0x0148: "n", \
0x0149: "n", \
0x014A: "_", \
0x014B: "_", \
0x014C: "O", \
0x014D: "o", \
0x014E: "O", \
0x014F: "o", \
0x0150: "''o", \
0x0152: "OE", \
0x0153: "oe", \
0x0154: "'R", \
0x0155: "'r", \
0x0156: "R", \
0x0157: "r", \
0x0158: "R", \
0x0159: "r", \
0x015A: "'s", \
0x015B: "'s", \
0x015C: "^S", \
0x015D: "^s", \
0x015E: "S", \
0x015F: "s", \
0x0160: "S", \
0x0161: "s", \
0x0162: "T", \
0x0163: "t", \
0x0164: "T", \
0x0165: "t", \
0x0166: "T", \
0x0167: "t", \
0x0168: "~U", \
0x0169: "~u", \
0x016A: "U", \
0x016B: "u", \
0x016C: "U", \
0x016D: "u", \
0x016E: "U", \
0x016F: "u", \
0x0170: "''u", \
0x0172: "U", \
0x0173: "u", \
0x0174: "^W", \
0x0175: "^w", \
0x0176: "^Y", \
0x0177: "^y", \
0x0178: "Y", \
0x0179: "'Z", \
0x017A: "'z", \
0x017B: "Z", \
0x017C: "z", \
0x017D: "Z", \
0x017E: "z", \
0x017F: "s", \
0x018F: "_", \
0x0192: "f", \
0x01C4: "DZ", \
0x01C5: "DZ", \
0x01C6: "DZ", \
0x01C7: "LJ", \
0x01C8: "Lj", \
0x01C9: "lj", \
0x01CA: "NJ", \
0x01CB: "Nj", \
0x01CC: "nj", \
0x01F1: "DZ", \
0x01F2: "Dz", \
0x01F3: "dz", \
0x0218: "S", \
0x0219: "s", \
0x021A: "T", \
0x021B: "t", \
0x0259: "_", \
0x20AC: "EUR" }
def legacyEncode(string):
string2 = ""
for z, char in enumerate(string.decode("utf-8")):
i = ord(char)
if i < 33:
string2 += "_"
elif i in ASCIItranslit:
string2 += ASCIItranslit[i]
else:
try:
string2 += char.encode('ascii', 'strict')
except:
string2 += "_"
return string2.upper()
| gpl-2.0 |
vitorio/bite-project | deps/gdata-python-client/src/gdata/contacts/__init__.py | 119 | 28208 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to ElementWrapper objects used with Google Contacts."""
__author__ = 'dbrattli (Dag Brattli)'
import atom
import gdata
## Constants from http://code.google.com/apis/gdata/elements.html ##
REL_HOME = 'http://schemas.google.com/g/2005#home'
REL_WORK = 'http://schemas.google.com/g/2005#work'
REL_OTHER = 'http://schemas.google.com/g/2005#other'
# AOL Instant Messenger protocol
IM_AIM = 'http://schemas.google.com/g/2005#AIM'
IM_MSN = 'http://schemas.google.com/g/2005#MSN' # MSN Messenger protocol
IM_YAHOO = 'http://schemas.google.com/g/2005#YAHOO' # Yahoo Messenger protocol
IM_SKYPE = 'http://schemas.google.com/g/2005#SKYPE' # Skype protocol
IM_QQ = 'http://schemas.google.com/g/2005#QQ' # QQ protocol
# Google Talk protocol
IM_GOOGLE_TALK = 'http://schemas.google.com/g/2005#GOOGLE_TALK'
IM_ICQ = 'http://schemas.google.com/g/2005#ICQ' # ICQ protocol
IM_JABBER = 'http://schemas.google.com/g/2005#JABBER' # Jabber protocol
IM_NETMEETING = 'http://schemas.google.com/g/2005#netmeeting' # NetMeeting
PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo'
PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo'
# Different phone types, for more info see:
# http://code.google.com/apis/gdata/docs/2.0/elements.html#gdPhoneNumber
PHONE_CAR = 'http://schemas.google.com/g/2005#car'
PHONE_FAX = 'http://schemas.google.com/g/2005#fax'
PHONE_GENERAL = 'http://schemas.google.com/g/2005#general'
PHONE_HOME = REL_HOME
PHONE_HOME_FAX = 'http://schemas.google.com/g/2005#home_fax'
PHONE_INTERNAL = 'http://schemas.google.com/g/2005#internal-extension'
PHONE_MOBILE = 'http://schemas.google.com/g/2005#mobile'
PHONE_OTHER = REL_OTHER
PHONE_PAGER = 'http://schemas.google.com/g/2005#pager'
PHONE_SATELLITE = 'http://schemas.google.com/g/2005#satellite'
PHONE_VOIP = 'http://schemas.google.com/g/2005#voip'
PHONE_WORK = REL_WORK
PHONE_WORK_FAX = 'http://schemas.google.com/g/2005#work_fax'
PHONE_WORK_MOBILE = 'http://schemas.google.com/g/2005#work_mobile'
PHONE_WORK_PAGER = 'http://schemas.google.com/g/2005#work_pager'
PHONE_MAIN = 'http://schemas.google.com/g/2005#main'
PHONE_ASSISTANT = 'http://schemas.google.com/g/2005#assistant'
PHONE_CALLBACK = 'http://schemas.google.com/g/2005#callback'
PHONE_COMPANY_MAIN = 'http://schemas.google.com/g/2005#company_main'
PHONE_ISDN = 'http://schemas.google.com/g/2005#isdn'
PHONE_OTHER_FAX = 'http://schemas.google.com/g/2005#other_fax'
PHONE_RADIO = 'http://schemas.google.com/g/2005#radio'
PHONE_TELEX = 'http://schemas.google.com/g/2005#telex'
PHONE_TTY_TDD = 'http://schemas.google.com/g/2005#tty_tdd'
EXTERNAL_ID_ORGANIZATION = 'organization'
RELATION_MANAGER = 'manager'
CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008'
class GDataBase(atom.AtomBase):
"""The Google Contacts intermediate class from atom.AtomBase."""
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None,
extension_elements=None, extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class ContactsBase(GDataBase):
"""The Google Contacts intermediate class for Contacts namespace."""
_namespace = CONTACTS_NAMESPACE
class OrgName(GDataBase):
"""The Google Contacts OrgName element."""
_tag = 'orgName'
class OrgTitle(GDataBase):
"""The Google Contacts OrgTitle element."""
_tag = 'orgTitle'
class OrgDepartment(GDataBase):
"""The Google Contacts OrgDepartment element."""
_tag = 'orgDepartment'
class OrgJobDescription(GDataBase):
"""The Google Contacts OrgJobDescription element."""
_tag = 'orgJobDescription'
class Where(GDataBase):
"""The Google Contacts Where element."""
_tag = 'where'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['label'] = 'label'
_attributes['valueString'] = 'value_string'
def __init__(self, value_string=None, rel=None, label=None,
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.rel = rel
self.label = label
self.value_string = value_string
class When(GDataBase):
"""The Google Contacts When element."""
_tag = 'when'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['startTime'] = 'start_time'
_attributes['endTime'] = 'end_time'
_attributes['label'] = 'label'
def __init__(self, start_time=None, end_time=None, label=None,
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.start_time = start_time
self.end_time = end_time
self.label = label
class Organization(GDataBase):
"""The Google Contacts Organization element."""
_tag = 'organization'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_attributes['primary'] = 'primary'
_children['{%s}orgName' % GDataBase._namespace] = (
'org_name', OrgName)
_children['{%s}orgTitle' % GDataBase._namespace] = (
'org_title', OrgTitle)
_children['{%s}orgDepartment' % GDataBase._namespace] = (
'org_department', OrgDepartment)
_children['{%s}orgJobDescription' % GDataBase._namespace] = (
'org_job_description', OrgJobDescription)
#_children['{%s}where' % GDataBase._namespace] = ('where', Where)
def __init__(self, label=None, rel=None, primary='false', org_name=None,
org_title=None, org_department=None, org_job_description=None,
where=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel or REL_OTHER
self.primary = primary
self.org_name = org_name
self.org_title = org_title
self.org_department = org_department
self.org_job_description = org_job_description
self.where = where
class PostalAddress(GDataBase):
"""The Google Contacts PostalAddress element."""
_tag = 'postalAddress'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['primary'] = 'primary'
def __init__(self, primary=None, rel=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.rel = rel or REL_OTHER
self.primary = primary
class FormattedAddress(GDataBase):
"""The Google Contacts FormattedAddress element."""
_tag = 'formattedAddress'
class StructuredPostalAddress(GDataBase):
"""The Google Contacts StructuredPostalAddress element."""
_tag = 'structuredPostalAddress'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['primary'] = 'primary'
_children['{%s}formattedAddress' % GDataBase._namespace] = (
'formatted_address', FormattedAddress)
def __init__(self, rel=None, primary=None,
formatted_address=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.rel = rel or REL_OTHER
self.primary = primary
self.formatted_address = formatted_address
class IM(GDataBase):
"""The Google Contacts IM element."""
_tag = 'im'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['address'] = 'address'
_attributes['primary'] = 'primary'
_attributes['protocol'] = 'protocol'
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
def __init__(self, primary='false', rel=None, address=None, protocol=None,
label=None, text=None,
extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.protocol = protocol
self.address = address
self.primary = primary
self.rel = rel or REL_OTHER
self.label = label
class Email(GDataBase):
"""The Google Contacts Email element."""
_tag = 'email'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['address'] = 'address'
_attributes['primary'] = 'primary'
_attributes['rel'] = 'rel'
_attributes['label'] = 'label'
def __init__(self, label=None, rel=None, address=None, primary='false',
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel or REL_OTHER
self.address = address
self.primary = primary
class PhoneNumber(GDataBase):
"""The Google Contacts PhoneNumber element."""
_tag = 'phoneNumber'
_children = GDataBase._children.copy()
_attributes = GDataBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_attributes['uri'] = 'uri'
_attributes['primary'] = 'primary'
def __init__(self, label=None, rel=None, uri=None, primary='false',
text=None, extension_elements=None, extension_attributes=None):
GDataBase.__init__(self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel or REL_OTHER
self.uri = uri
self.primary = primary
class Nickname(ContactsBase):
"""The Google Contacts Nickname element."""
_tag = 'nickname'
class Occupation(ContactsBase):
"""The Google Contacts Occupation element."""
_tag = 'occupation'
class Gender(ContactsBase):
"""The Google Contacts Gender element."""
_tag = 'gender'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.value = value
class Birthday(ContactsBase):
"""The Google Contacts Birthday element."""
_tag = 'birthday'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['when'] = 'when'
def __init__(self, when=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.when = when
class Relation(ContactsBase):
"""The Google Contacts Relation element."""
_tag = 'relation'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
def __init__(self, label=None, rel=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel
def RelationFromString(xml_string):
return atom.CreateClassFromXMLString(Relation, xml_string)
class UserDefinedField(ContactsBase):
"""The Google Contacts UserDefinedField element."""
_tag = 'userDefinedField'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['key'] = 'key'
_attributes['value'] = 'value'
def __init__(self, key=None, value=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.key = key
self.value = value
def UserDefinedFieldFromString(xml_string):
return atom.CreateClassFromXMLString(UserDefinedField, xml_string)
class Website(ContactsBase):
"""The Google Contacts Website element."""
_tag = 'website'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['href'] = 'href'
_attributes['label'] = 'label'
_attributes['primary'] = 'primary'
_attributes['rel'] = 'rel'
def __init__(self, href=None, label=None, primary='false', rel=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.href = href
self.label = label
self.primary = primary
self.rel = rel
def WebsiteFromString(xml_string):
return atom.CreateClassFromXMLString(Website, xml_string)
class ExternalId(ContactsBase):
"""The Google Contacts ExternalId element."""
_tag = 'externalId'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_attributes['value'] = 'value'
def __init__(self, label=None, rel=None, value=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel
self.value = value
def ExternalIdFromString(xml_string):
return atom.CreateClassFromXMLString(ExternalId, xml_string)
class Event(ContactsBase):
"""The Google Contacts Event element."""
_tag = 'event'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['label'] = 'label'
_attributes['rel'] = 'rel'
_children['{%s}when' % ContactsBase._namespace] = ('when', When)
def __init__(self, label=None, rel=None, when=None,
text=None, extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.label = label
self.rel = rel
self.when = when
def EventFromString(xml_string):
return atom.CreateClassFromXMLString(Event, xml_string)
class Deleted(GDataBase):
"""The Google Contacts Deleted element."""
_tag = 'deleted'
class GroupMembershipInfo(ContactsBase):
"""The Google Contacts GroupMembershipInfo element."""
_tag = 'groupMembershipInfo'
_children = ContactsBase._children.copy()
_attributes = ContactsBase._attributes.copy()
_attributes['deleted'] = 'deleted'
_attributes['href'] = 'href'
def __init__(self, deleted=None, href=None, text=None,
extension_elements=None, extension_attributes=None):
ContactsBase.__init__(self, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.deleted = deleted
self.href = href
class PersonEntry(gdata.BatchEntry):
"""Base class for ContactEntry and ProfileEntry."""
_children = gdata.BatchEntry._children.copy()
_children['{%s}organization' % gdata.GDATA_NAMESPACE] = (
'organization', [Organization])
_children['{%s}phoneNumber' % gdata.GDATA_NAMESPACE] = (
'phone_number', [PhoneNumber])
_children['{%s}nickname' % CONTACTS_NAMESPACE] = ('nickname', Nickname)
_children['{%s}occupation' % CONTACTS_NAMESPACE] = ('occupation', Occupation)
_children['{%s}gender' % CONTACTS_NAMESPACE] = ('gender', Gender)
_children['{%s}birthday' % CONTACTS_NAMESPACE] = ('birthday', Birthday)
_children['{%s}postalAddress' % gdata.GDATA_NAMESPACE] = ('postal_address',
[PostalAddress])
_children['{%s}structuredPostalAddress' % gdata.GDATA_NAMESPACE] = (
'structured_postal_address', [StructuredPostalAddress])
_children['{%s}email' % gdata.GDATA_NAMESPACE] = ('email', [Email])
_children['{%s}im' % gdata.GDATA_NAMESPACE] = ('im', [IM])
_children['{%s}relation' % CONTACTS_NAMESPACE] = ('relation', [Relation])
_children['{%s}userDefinedField' % CONTACTS_NAMESPACE] = (
'user_defined_field', [UserDefinedField])
_children['{%s}website' % CONTACTS_NAMESPACE] = ('website', [Website])
_children['{%s}externalId' % CONTACTS_NAMESPACE] = (
'external_id', [ExternalId])
_children['{%s}event' % CONTACTS_NAMESPACE] = ('event', [Event])
# The following line should be removed once the Python support
# for GData 2.0 is mature.
_attributes = gdata.BatchEntry._attributes.copy()
_attributes['{%s}etag' % gdata.GDATA_NAMESPACE] = 'etag'
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None, organization=None, phone_number=None,
nickname=None, occupation=None, gender=None, birthday=None,
postal_address=None, structured_postal_address=None, email=None,
im=None, relation=None, user_defined_field=None, website=None,
external_id=None, event=None, batch_operation=None,
batch_id=None, batch_status=None, text=None,
extension_elements=None, extension_attributes=None, etag=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published,
batch_operation=batch_operation,
batch_id=batch_id, batch_status=batch_status,
title=title, updated=updated)
self.organization = organization or []
self.phone_number = phone_number or []
self.nickname = nickname
self.occupation = occupation
self.gender = gender
self.birthday = birthday
self.postal_address = postal_address or []
self.structured_postal_address = structured_postal_address or []
self.email = email or []
self.im = im or []
self.relation = relation or []
self.user_defined_field = user_defined_field or []
self.website = website or []
self.external_id = external_id or []
self.event = event or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
# The following line should be removed once the Python support
# for GData 2.0 is mature.
self.etag = etag
class ContactEntry(PersonEntry):
"""A Google Contact flavor of an Atom Entry."""
_children = PersonEntry._children.copy()
_children['{%s}deleted' % gdata.GDATA_NAMESPACE] = ('deleted', Deleted)
_children['{%s}groupMembershipInfo' % CONTACTS_NAMESPACE] = (
'group_membership_info', [GroupMembershipInfo])
_children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = (
'extended_property', [gdata.ExtendedProperty])
# Overwrite the organization rule in PersonEntry so that a ContactEntry
# may only contain one <gd:organization> element.
_children['{%s}organization' % gdata.GDATA_NAMESPACE] = (
'organization', Organization)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None, organization=None, phone_number=None,
nickname=None, occupation=None, gender=None, birthday=None,
postal_address=None, structured_postal_address=None, email=None,
im=None, relation=None, user_defined_field=None, website=None,
external_id=None, event=None, batch_operation=None,
batch_id=None, batch_status=None, text=None,
extension_elements=None, extension_attributes=None, etag=None,
deleted=None, extended_property=None,
group_membership_info=None):
PersonEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title, updated=updated,
organization=organization, phone_number=phone_number,
nickname=nickname, occupation=occupation,
gender=gender, birthday=birthday,
postal_address=postal_address,
structured_postal_address=structured_postal_address,
email=email, im=im, relation=relation,
user_defined_field=user_defined_field,
website=website, external_id=external_id, event=event,
batch_operation=batch_operation, batch_id=batch_id,
batch_status=batch_status, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes, etag=etag)
self.deleted = deleted
self.extended_property = extended_property or []
self.group_membership_info = group_membership_info or []
def GetPhotoLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_LINK_REL:
return a_link
return None
def GetPhotoEditLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_EDIT_LINK_REL:
return a_link
return None
def ContactEntryFromString(xml_string):
return atom.CreateClassFromXMLString(ContactEntry, xml_string)
class ContactsFeed(gdata.BatchFeed, gdata.LinkFinder):
"""A Google Contacts feed flavor of an Atom Feed."""
_children = gdata.BatchFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ContactEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def ContactsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(ContactsFeed, xml_string)
class GroupEntry(gdata.BatchEntry):
"""Represents a contact group."""
_children = gdata.BatchEntry._children.copy()
_children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = (
'extended_property', [gdata.ExtendedProperty])
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None,
rights=None, source=None, summary=None, control=None,
title=None, updated=None,
extended_property=None, batch_operation=None, batch_id=None,
batch_status=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
batch_operation=batch_operation,
batch_id=batch_id, batch_status=batch_status,
title=title, updated=updated)
self.extended_property = extended_property or []
def GroupEntryFromString(xml_string):
return atom.CreateClassFromXMLString(GroupEntry, xml_string)
class GroupsFeed(gdata.BatchFeed):
"""A Google contact groups feed flavor of an Atom Feed."""
_children = gdata.BatchFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GroupEntry])
def GroupsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GroupsFeed, xml_string)
class ProfileEntry(PersonEntry):
"""A Google Profiles flavor of an Atom Entry."""
def ProfileEntryFromString(xml_string):
"""Converts an XML string into a ProfileEntry object.
Args:
xml_string: string The XML describing a Profile entry.
Returns:
A ProfileEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfileEntry, xml_string)
class ProfilesFeed(gdata.BatchFeed, gdata.LinkFinder):
"""A Google Profiles feed flavor of an Atom Feed."""
_children = gdata.BatchFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def ProfilesFeedFromString(xml_string):
"""Converts an XML string into a ProfilesFeed object.
Args:
xml_string: string The XML describing a Profiles feed.
Returns:
A ProfilesFeed object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfilesFeed, xml_string)
| apache-2.0 |
mozilla/relman-auto-nag | auto_nag/scripts/mismatch_priority_tracking_esr.py | 2 | 1862 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from auto_nag import utils
from auto_nag.bzcleaner import BzCleaner
class MismatchPrioTrackESR(BzCleaner):
def __init__(self):
super(MismatchPrioTrackESR, self).__init__()
self.init_versions()
def description(self):
return "Bug tracked for esr with a bad priority (P3, P4 or P5)"
def template(self):
return "mismatch_priority_tracking.html"
def ignore_date(self):
return True
def get_bz_params(self, date):
esr_version = self.versions["esr"]
value = ",".join(["---", "affected"])
params = {
"resolution": [
"---",
"FIXED",
"INVALID",
"WONTFIX",
"DUPLICATE",
"WORKSFORME",
"INCOMPLETE",
"SUPPORT",
"EXPIRED",
"MOVED",
],
"priority": ["P3", "P4", "P5"],
"f1": utils.get_flag(esr_version, "tracking", "esr"),
"o1": "anyexact",
"v1": ",".join(["+", "blocking"]),
"f2": utils.get_flag(esr_version, "status", "esr"),
"o2": "anyexact",
"v2": value,
}
return params
def get_autofix_change(self):
return {
"comment": {
"body": "Changing the priority to p1 as the bug is tracked by a release manager for the current esr.\nSee [What Do You Triage](https://firefox-source-docs.mozilla.org/bug-mgmt/guides/priority.html) for more information"
},
"priority": "p1",
}
if __name__ == "__main__":
MismatchPrioTrackESR().run()
| bsd-3-clause |
RobertABT/heightmap | build/scipy/scipy/optimize/nnls.py | 116 | 1423 | from __future__ import division, print_function, absolute_import
from . import _nnls
from numpy import asarray_chkfinite, zeros, double
__all__ = ['nnls']
def nnls(A, b):
"""
Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. This is a wrapper
for a FORTAN non-negative least squares solver.
Parameters
----------
A : ndarray
Matrix ``A`` as shown above.
b : ndarray
Right-hand side vector.
Returns
-------
x : ndarray
Solution vector.
rnorm : float
The residual, ``|| Ax-b ||_2``.
Notes
-----
The FORTRAN code was published in the book below. The algorithm
is an active set method. It solves the KKT (Karush-Kuhn-Tucker)
conditions for the non-negative least squares problem.
References
----------
Lawson C., Hanson R.J., (1987) Solving Least Squares Problems, SIAM
"""
A, b = map(asarray_chkfinite, (A, b))
if len(A.shape) != 2:
raise ValueError("expected matrix")
if len(b.shape) != 1:
raise ValueError("expected vector")
m, n = A.shape
if m != b.shape[0]:
raise ValueError("incompatible dimensions")
w = zeros((n,), dtype=double)
zz = zeros((m,), dtype=double)
index = zeros((n,), dtype=int)
x, rnorm, mode = _nnls.nnls(A, m, n, b, w, zz, index)
if mode != 1:
raise RuntimeError("too many iterations")
return x, rnorm
| mit |
IlyaSkriblovsky/txmongo | txmongo/collection.py | 2 | 57347 | # Copyright 2009-2015 The TxMongo Developers. All rights reserved.
# Use of this source code is governed by the Apache License that can be
# found in the LICENSE file.
from __future__ import absolute_import, division
import io
import struct
import collections
import warnings
from bson import BSON, ObjectId
from bson.code import Code
from bson.son import SON
from bson.codec_options import CodecOptions
from pymongo.bulk import _Bulk, _COMMANDS, _UOP
from pymongo.errors import InvalidName, BulkWriteError, InvalidOperation, OperationFailure, DuplicateKeyError, \
WriteError, WTimeoutError, WriteConcernError
from pymongo.helpers import _check_command_response
from pymongo.message import _OP_MAP, _INSERT, _DELETE, _UPDATE
from pymongo.results import InsertOneResult, InsertManyResult, UpdateResult, \
DeleteResult, BulkWriteResult
from pymongo.common import validate_ok_for_update, validate_ok_for_replace, \
validate_is_mapping, validate_boolean
from pymongo.collection import ReturnDocument
from pymongo.write_concern import WriteConcern
from txmongo.filter import _QueryFilter
from txmongo.protocol import DELETE_SINGLE_REMOVE, UPDATE_UPSERT, UPDATE_MULTI, \
Query, Getmore, Insert, Update, Delete, KillCursors
from txmongo.utils import check_deadline, timeout
from txmongo import filter as qf
from twisted.internet import defer
from twisted.python.compat import unicode, comparable
# Copied from pymongo/helpers.py:193 at commit 47b0d8ebfd6cefca80c1e4521b47aec7cf8f529d
def _raise_last_write_error(write_errors):
# If the last batch had multiple errors only report
# the last error to emulate continue_on_error.
error = write_errors[-1]
if error.get("code") == 11000:
raise DuplicateKeyError(error.get("errmsg"), 11000, error)
raise WriteError(error.get("errmsg"), error.get("code"), error)
# Copied from pymongo/helpers.py:202 at commit 47b0d8ebfd6cefca80c1e4521b47aec7cf8f529d
def _raise_write_concern_error(error):
if "errInfo" in error and error["errInfo"].get('wtimeout'):
# Make sure we raise WTimeoutError
raise WTimeoutError(
error.get("errmsg"), error.get("code"), error)
raise WriteConcernError(
error.get("errmsg"), error.get("code"), error)
# Copied from pymongo/helpers.py:211 at commit 47b0d8ebfd6cefca80c1e4521b47aec7cf8f529d
def _check_write_command_response(result):
"""Backward compatibility helper for write command error handling.
"""
# Prefer write errors over write concern errors
write_errors = result.get("writeErrors")
if write_errors:
_raise_last_write_error(write_errors)
error = result.get("writeConcernError")
if error:
_raise_write_concern_error(error)
# Copied from pymongo/bulk.py:93 at commit 96aaf2f5279fb9eee5d0c1a2ce53d243b2772eee
def _merge_command(run, full_result, results):
"""Merge a group of results from write commands into the full result.
"""
for offset, result in results:
affected = result.get("n", 0)
if run.op_type == _INSERT:
full_result["nInserted"] += affected
elif run.op_type == _DELETE:
full_result["nRemoved"] += affected
elif run.op_type == _UPDATE:
upserted = result.get("upserted")
if upserted:
n_upserted = len(upserted)
for doc in upserted:
doc["index"] = run.index(doc["index"] + offset)
full_result["upserted"].extend(upserted)
full_result["nUpserted"] += n_upserted
full_result["nMatched"] += (affected - n_upserted)
else:
full_result["nMatched"] += affected
full_result["nModified"] += result["nModified"]
write_errors = result.get("writeErrors")
if write_errors:
for doc in write_errors:
# Leave the server response intact for APM.
replacement = doc.copy()
idx = doc["index"] + offset
replacement["index"] = run.index(idx)
# Add the failed operation to the error document.
replacement[_UOP] = run.ops[idx]
full_result["writeErrors"].append(replacement)
wc_error = result.get("writeConcernError")
if wc_error:
full_result["writeConcernErrors"].append(wc_error)
@comparable
class Collection(object):
"""Creates new :class:`Collection` object
:param database:
the :class:`Database` instance to get collection from
:param name:
the name of the collection to get
:param write_concern:
An instance of :class:`~pymongo.write_concern.WriteConcern`.
If ``None``, ``database.write_concern`` is used.
:param codec_options:
An instance of :class:`~bson.codec_options.CodecOptions`.
If ``None``, ``database.codec_options`` is used.
"""
def __init__(self, database, name, write_concern=None, codec_options=None):
if not isinstance(name, (bytes, unicode)):
raise TypeError("TxMongo: name must be an instance of (bytes, unicode).")
if not name or ".." in name:
raise InvalidName("TxMongo: collection names cannot be empty.")
if "$" in name and not (name.startswith("oplog.$main") or
name.startswith("$cmd")):
msg = "TxMongo: collection names must not contain '$', '{0}'".format(repr(name))
raise InvalidName(msg)
if name[0] == "." or name[-1] == ".":
msg = "TxMongo: collection names must not start or end with '.', '{0}'".format(repr(name))
raise InvalidName(msg)
if "\x00" in name:
raise InvalidName("TxMongo: collection names must not contain the null character.")
self._database = database
self._collection_name = unicode(name)
self.__write_concern = write_concern
self.__codec_options = codec_options
def __str__(self):
return "%s.%s" % (str(self._database), self._collection_name)
def __repr__(self):
return "Collection(%s, %s)" % (self._database, self._collection_name)
@property
def full_name(self):
"""Full name of this :class:`Collection`, i.e.
`db_name.collection_name`"""
return '{0}.{1}'.format(str(self._database), self._collection_name)
@property
def name(self):
"""Name of this :class:`Collection` (without database name)."""
return self._collection_name
@property
def database(self):
"""The :class:`~txmongo.database.Database` that this :class:`Collection`
is a part of."""
return self._database
def __getitem__(self, collection_name):
"""Get a sub-collection of this collection by name."""
return Collection(self._database,
"%s.%s" % (self._collection_name, collection_name))
def __cmp__(self, other):
if isinstance(other, Collection):
def cmp(a, b):
return (a > b) - (a < b)
return cmp((self._database, self._collection_name),
(other._database, other._collection_name))
return NotImplemented
def __getattr__(self, collection_name):
"""Get a sub-collection of this collection by name."""
return self[collection_name]
def __call__(self, collection_name):
"""Get a sub-collection of this collection by name."""
return self[collection_name]
@property
def codec_options(self):
"""Read only access to the :class:`~bson.codec_options.CodecOptions`
of this instance.
Use ``coll.with_options(codec_options=CodecOptions(...))`` to change
codec options.
"""
return self.__codec_options or self._database.codec_options
@property
def write_concern(self):
"""Read only access to the :class:`~pymongo.write_concern.WriteConcern`
of this instance.
Use ``coll.with_options(write_concern=WriteConcern(...))`` to change
the Write Concern.
"""
return self.__write_concern or self._database.write_concern
def with_options(self, **kwargs):
"""with_options(*, write_concern=None, codec_options=None)
Get a clone of collection changing the specified settings.
:param write_concern: *(keyword only)*
new :class:`~pymongo.write_concern.WriteConcern` to use.
:param codec_options: *(keyword only)*
new :class:`~bson.codec_options.CodecOptions` to use.
"""
# PyMongo's method gets several positional arguments. We support
# only write_concern for now which is the 3rd positional argument.
# So we are using **kwargs here to force user's code to specify
# write_concern as named argument, so adding other args in future
# won't break compatibility
write_concern = kwargs.get("write_concern") or self.__write_concern
codec_options = kwargs.get("codec_options") or self.codec_options
return Collection(self._database, self._collection_name,
write_concern=write_concern,
codec_options=codec_options)
@staticmethod
def _normalize_fields_projection(fields):
"""
transform a list of fields from ["a", "b"] to {"a":1, "b":1}
"""
if fields is None:
return None
if isinstance(fields, dict):
return fields
# Consider fields as iterable
as_dict = {}
for field in fields:
if not isinstance(field, (bytes, unicode)):
raise TypeError("TxMongo: fields must be a list of key names.")
as_dict[field] = 1
if not as_dict:
# Empty list should be treated as "_id only"
as_dict = {"_id": 1}
return as_dict
@staticmethod
def _gen_index_name(keys):
return u'_'.join([u"%s_%s" % item for item in keys])
def _list_collections_3_0(self):
def on_ok(response):
assert response["cursor"]["id"] == 0
first_batch = response["cursor"]["firstBatch"]
if first_batch:
return first_batch[0]
else:
return None
return self._database.command(
SON([("listCollections", 1),
("filter", {"name": self.name})])).addCallback(on_ok)
@timeout
def options(self, _deadline=None):
"""options()
Get the options set on this collection.
:returns:
:class:`Deferred` that called back with dictionary of options
and their values or with empty dict if collection doesn't exist.
"""
def on_3_0_fail(failure):
failure.trap(OperationFailure)
return self._database.system.namespaces.find_one({"name": str(self)}, _deadline=_deadline)
def on_ok(result):
if not result:
result = {}
options = result.get("options", {})
if "create" in options:
del options["create"]
return options
return self._list_collections_3_0()\
.addErrback(on_3_0_fail)\
.addCallbacks(on_ok)
@staticmethod
def _find_args_compat(*args, **kwargs):
"""
signature of find() was changed from
(spec=None, skip=0, limit=0, fields=None, filter=None, cursor=False, **kwargs)
to
(filter=None, projection=None, skip=0, limit=0, sort=None, **kwargs)
This function makes it compatible with both
"""
def old(spec=None, skip=0, limit=0, fields=None, filter=None, cursor=False, **kwargs):
warnings.warn("find(), find_with_cursor() and find_one() signatures have "
"changed. Please refer to documentation.", DeprecationWarning)
return new(spec, fields, skip, limit, filter, cursor=cursor, **kwargs)
def new(filter=None, projection=None, skip=0, limit=0, sort=None, **kwargs):
args = {"filter": filter, "projection": projection, "skip": skip, "limit": limit,
"sort": sort}
args.update(kwargs)
return args
old_if = (
"fields" in kwargs,
"spec" in kwargs,
len(args) == 0 and isinstance(kwargs.get("filter"), _QueryFilter),
len(args) >= 1 and "filter" in kwargs,
len(args) >= 2 and isinstance(args[1], int),
)
if any(old_if):
return old(*args, **kwargs)
else:
return new(*args, **kwargs)
@timeout
def find(self, *args, **kwargs):
"""find(filter=None, projection=None, skip=0, limit=0, sort=None, **kwargs)
Find documents in a collection.
Ordering, indexing hints and other query parameters can be set with
`sort` argument. See :mod:`txmongo.filter` for details.
:param filter:
MongoDB query document. To return all documents in a collection,
omit this parameter or pass an empty document (``{}``). You can pass
``{"key": "value"}`` to select documents having ``key`` field
equal to ``"value"`` or use any of `MongoDB's query selectors
<https://docs.mongodb.org/manual/reference/operator/query/#query-selectors>`_.
:param projection:
a list of field names that should be returned for each document
in the result set or a dict specifying field names to include or
exclude. If `projection` is a list ``_id`` fields will always be
returned. Use a dict form to exclude fields:
``projection={"_id": False}``.
:param skip:
the number of documents to omit from the start of the result set.
:param limit:
the maximum number of documents to return. All documents are
returned when `limit` is zero.
:param sort:
query filter. You can specify ordering, indexing hints and other query
parameters with this argument. See :mod:`txmongo.filter` for details.
:returns: an instance of :class:`Deferred` that called back with a list with
all documents found.
"""
new_kwargs = self._find_args_compat(*args, **kwargs)
return self.__real_find(**new_kwargs)
def __real_find(self, filter=None, projection=None, skip=0, limit=0, sort=None, **kwargs):
cursor = kwargs.pop("cursor", False)
rows = []
def on_ok(result, this_func):
docs, dfr = result
if cursor:
warnings.warn("find() with cursor=True is deprecated. Please use"
"find_with_cursor() instead.", DeprecationWarning)
return docs, dfr
if docs:
rows.extend(docs)
return dfr.addCallback(this_func, this_func)
else:
return rows
return self.__real_find_with_cursor(filter, projection, skip, limit, sort,
**kwargs).addCallback(on_ok, on_ok)
@staticmethod
def __apply_find_filter(spec, c_filter):
if c_filter:
if "query" not in spec:
spec = {"$query": spec}
for k, v in c_filter.items():
if isinstance(v, (list, tuple)):
spec['$' + k] = SON(v)
else:
spec['$' + k] = v
return spec
@timeout
def find_with_cursor(self, *args, **kwargs):
"""find_with_cursor(filter=None, projection=None, skip=0, limit=0, sort=None, batch_size=0, **kwargs)
Find documents in a collection and return them in one batch at a time.
Arguments are the same as for :meth:`find()`.
:returns: an instance of :class:`Deferred` that fires with tuple of ``(docs, dfr)``,
where ``docs`` is a partial result, returned by MongoDB in a first batch and
``dfr`` is a :class:`Deferred` that fires with next ``(docs, dfr)``. Last result
will be ``([], None)``. You can iterate over the result set with code like that:
::
@defer.inlineCallbacks
def query():
docs, dfr = yield coll.find(query, cursor=True)
while docs:
for doc in docs:
do_something(doc)
docs, dfr = yield dfr
"""
new_kwargs = self._find_args_compat(*args, **kwargs)
return self.__real_find_with_cursor(**new_kwargs)
def __real_find_with_cursor(self, filter=None, projection=None, skip=0, limit=0, sort=None, batch_size=0,**kwargs):
if filter is None:
filter = SON()
if not isinstance(filter, dict):
raise TypeError("TxMongo: filter must be an instance of dict.")
if not isinstance(projection, (dict, list)) and projection is not None:
raise TypeError("TxMongo: projection must be an instance of dict or list.")
if not isinstance(skip, int):
raise TypeError("TxMongo: skip must be an instance of int.")
if not isinstance(limit, int):
raise TypeError("TxMongo: limit must be an instance of int.")
if not isinstance(batch_size, int):
raise TypeError("TxMongo: batch_size must be an instance of int.")
projection = self._normalize_fields_projection(projection)
filter = self.__apply_find_filter(filter, sort)
as_class = kwargs.get("as_class")
proto = self._database.connection.getprotocol()
def after_connection(protocol):
flags = kwargs.get("flags", 0)
check_deadline(kwargs.pop("_deadline", None))
if batch_size and limit:
n_to_return = min(batch_size,limit)
elif batch_size:
n_to_return = batch_size
else:
n_to_return = limit
query = Query(flags=flags, collection=str(self),
n_to_skip=skip, n_to_return=n_to_return,
query=filter, fields=projection)
deferred_query = protocol.send_QUERY(query)
deferred_query.addCallback(after_reply, protocol, after_reply)
return deferred_query
# this_func argument is just a reference to after_reply function itself.
# after_reply can reference to itself directly but this will create a circular
# reference between closure and function object which will add unnecessary
# work for GC.
def after_reply(reply, protocol, this_func, fetched=0):
documents = reply.documents
docs_count = len(documents)
if limit > 0:
docs_count = min(docs_count, limit - fetched)
fetched += docs_count
options = self.codec_options
if as_class is not None:
options = options._replace(document_class=as_class)
out = [document.decode(codec_options=options) for document in documents[:docs_count]]
if reply.cursor_id:
# please note that this will not be the case if batch_size = 1
# it is documented (parameter numberToReturn for OP_QUERY)
# https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#wire-op-query
if limit == 0:
to_fetch = 0 # no limit
if batch_size:
to_fetch = batch_size
elif limit < 0:
# We won't actually get here because MongoDB won't
# create cursor when limit < 0
to_fetch = None
else:
to_fetch = limit - fetched
if to_fetch <= 0:
to_fetch = None # close cursor
elif batch_size:
to_fetch = min(batch_size,to_fetch)
if to_fetch is None:
protocol.send_KILL_CURSORS(KillCursors(cursors=[reply.cursor_id]))
return out, defer.succeed(([], None))
next_reply = protocol.send_GETMORE(Getmore(
collection=str(self), cursor_id=reply.cursor_id,
n_to_return=to_fetch
))
next_reply.addCallback(this_func, protocol, this_func, fetched)
return out, next_reply
return out, defer.succeed(([], None))
proto.addCallback(after_connection)
return proto
@timeout
def find_one(self, *args, **kwargs):
"""find_one(filter=None, projection=None, **kwargs)
Get a single document from the collection.
All arguments to :meth:`find()` are also valid for :meth:`find_one()`,
although `limit` will be ignored.
:returns:
a :class:`Deferred` that called back with single document
or ``None`` if no matching documents is found.
"""
new_kwargs = self._find_args_compat(*args, **kwargs)
if isinstance(new_kwargs["filter"], ObjectId):
new_kwargs["filter"] = {"_id": new_kwargs["filter"]}
new_kwargs["limit"] = 1
return self.__real_find(**new_kwargs)\
.addCallback(lambda result: result[0] if result else None)
@timeout
def count(self, filter=None, **kwargs):
"""Get the number of documents in this collection.
:param filter:
argument is a query document that selects which documents to
count in the collection.
:param hint: *(keyword only)*
:class:`~txmongo.filter.hint` instance specifying index to use.
:param int limit: *(keyword only)*
The maximum number of documents to count.
:param int skip: *(keyword only)*
The number of matching documents to skip before returning results.
:returns: a :class:`Deferred` that called back with a number of
documents matching the criteria.
"""
if "spec" in kwargs:
filter = kwargs["spec"]
if "hint" in kwargs:
hint = kwargs["hint"]
if not isinstance(hint, qf.hint):
raise TypeError("hint must be an instance of txmongo.filter.hint")
kwargs["hint"] = SON(kwargs["hint"]["hint"])
return self._database.command("count", self._collection_name,
query=filter or SON(), **kwargs)\
.addCallback(lambda result: int(result['n']))
@timeout
def group(self, keys, initial, reduce, condition=None, finalize=None, **kwargs):
body = {
"ns": self._collection_name,
"initial": initial,
"$reduce": Code(reduce),
}
if isinstance(keys, (bytes, unicode)):
body["$keyf"] = Code(keys)
else:
body["key"] = self._normalize_fields_projection(keys)
if condition:
body["cond"] = condition
if finalize:
body["finalize"] = Code(finalize)
return self._database.command("group", body, **kwargs)
@timeout
def filemd5(self, spec, **kwargs):
if not isinstance(spec, ObjectId):
raise ValueError("TxMongo: filemd5 expected an objectid for its non-keyword argument.")
return self._database.command("filemd5", spec, root=self._collection_name, **kwargs)\
.addCallback(lambda result: result.get("md5"))
def _get_write_concern(self, safe=None, **options):
from_opts = WriteConcern(options.get("w"),
options.get("wtimeout"),
options.get("j"),
options.get("fsync"))
if from_opts.document:
return from_opts
if safe is None:
return self.write_concern
elif safe:
if self.write_concern.acknowledged:
return self.write_concern
else:
# Edge case: MongoConnection(w=0).db.coll.insert(..., safe=True)
# In this case safe=True must issue getLastError without args
# even if connection-level write concern was unacknowledged
return WriteConcern()
return WriteConcern(w=0)
@timeout
def insert(self, docs, safe=None, flags=0, **kwargs):
"""Insert a document(s) into this collection.
*Please consider using new-style* :meth:`insert_one()` *or*
:meth:`insert_many()` *methods instead.*
If document doesn't have ``"_id"`` field, :meth:`insert()` will generate
new :class:`~bson.ObjectId` and set it to ``"_id"`` field of the document.
:param docs:
Document or a list of documents to insert into a collection.
:param safe:
``True`` or ``False`` forces usage of respectively acknowledged or
unacknowledged Write Concern. If ``None``, :attr:`write_concern` is
used.
:param flags:
If zero (default), inserting will stop after the first error
encountered. When ``flags`` set to
:const:`txmongo.protocol.INSERT_CONTINUE_ON_ERROR`, MongoDB will
try to insert all documents passed even if inserting some of
them will fail (for example, because of duplicate ``_id``). Not
that :meth:`insert()` won't raise any errors when this flag is
used.
:returns:
:class:`Deferred` that fires with single ``_id`` field or a list of
``_id`` fields of inserted documents.
"""
if isinstance(docs, dict):
ids = docs.get("_id", ObjectId())
docs["_id"] = ids
docs = [docs]
elif isinstance(docs, list):
ids = []
for doc in docs:
if isinstance(doc, dict):
oid = doc.get("_id", ObjectId())
ids.append(oid)
doc["_id"] = oid
else:
raise TypeError("TxMongo: insert takes a document or a list of documents.")
else:
raise TypeError("TxMongo: insert takes a document or a list of documents.")
docs = [BSON.encode(d) for d in docs]
insert = Insert(flags=flags, collection=str(self), documents=docs)
def on_proto(proto):
check_deadline(kwargs.pop("_deadline", None))
proto.send_INSERT(insert)
write_concern = self._get_write_concern(safe, **kwargs)
if write_concern.acknowledged:
return proto.get_last_error(str(self._database), **write_concern.document)\
.addCallback(lambda _: ids)
return ids
return self._database.connection.getprotocol().addCallback(on_proto)
def _insert_one(self, document, _deadline):
if self.write_concern.acknowledged:
command = SON([("insert", self._collection_name),
("documents", [document]),
("ordered", True),
("writeConcern", self.write_concern.document)])
return self._database.command(command, _deadline=_deadline)
else:
# falling back to OP_INSERT in case of unacknowledged op
return self.insert([document], _deadline=_deadline)\
.addCallback(lambda _: None)
@timeout
def insert_one(self, document, _deadline=None):
"""insert_one(document)
Insert a single document into collection
:param document: Document to insert
:returns:
:class:`Deferred` that called back with
:class:`pymongo.results.InsertOneResult`
"""
if "_id" not in document:
document["_id"] = ObjectId()
inserted_id = document["_id"]
def on_ok(result):
response = result
if response:
_check_write_command_response(response)
return InsertOneResult(inserted_id, self.write_concern.acknowledged)
return self._insert_one(document, _deadline).addCallback(on_ok)
@staticmethod
def _generate_batch_commands(collname, command, docs_field, documents, ordered,
write_concern, max_bson, max_count):
# Takes a list of documents and generates one or many `insert` commands
# with documents list in each command is less or equal to max_bson bytes
# and contains less or equal documents than max_count
# Manually composing command in BSON form because this way we can
# perform costly documents serialization only once
msg = SON([(command, collname),
("ordered", ordered),
("writeConcern", write_concern.document)])
buf = io.BytesIO()
buf.write(BSON.encode(msg))
buf.seek(-1, io.SEEK_END) # -1 because we don't need final NUL from partial command
buf.write(docs_field) # type, name and length placeholder of 'documents' array
docs_start = buf.tell() - 4
def prepare_command():
docs_end = buf.tell() + 1 # +1 for final NUL for 'documents'
buf.write(b'\x00\x00') # final NULs for 'documents' and the command itself
total_length = buf.tell()
# writing 'documents' length
buf.seek(docs_start)
buf.write(struct.pack('<i', docs_end - docs_start))
# writing total message length
buf.seek(0)
buf.write(struct.pack('<i', total_length))
return BSON(buf.getvalue())
idx = 0
idx_offset = 0
for doc in documents:
key = str(idx).encode('ascii')
value = BSON.encode(doc)
enough_size = buf.tell() + len(key)+2 + len(value) - docs_start > max_bson
enough_count = idx >= max_count
if enough_size or enough_count:
yield idx_offset, prepare_command()
buf.seek(docs_start + 4)
buf.truncate()
idx_offset += idx
idx = 0
key = b'0'
buf.write(b'\x03' + key + b'\x00') # type and key of document
buf.write(value)
idx += 1
yield idx_offset, prepare_command()
@timeout
def insert_many(self, documents, ordered=True, _deadline=None):
"""insert_many(documents, ordered=True)
Insert an iterable of documents into collection
:param documents:
An iterable of documents to insert (``list``,
``tuple``, ...)
:param ordered:
If ``True`` (the default) documents will be inserted on the server
serially, in the order provided. If an error occurs, all remaining
inserts are aborted. If ``False``, documents will be inserted on
the server in arbitrary order, possibly in parallel, and all
document inserts will be attempted.
:returns:
:class:`Deferred` that called back with
:class:`pymongo.results.InsertManyResult`
"""
inserted_ids = []
for doc in documents:
if isinstance(doc, collections.Mapping):
inserted_ids.append(doc.setdefault("_id", ObjectId()))
else:
raise TypeError("TxMongo: insert_many takes list of documents.")
bulk = _Bulk(self, ordered, bypass_document_validation=False)
bulk.ops = [(_INSERT, doc) for doc in documents]
result = InsertManyResult(inserted_ids, self.write_concern.acknowledged)
return self._execute_bulk(bulk).addCallback(lambda _: result)
@timeout
def update(self, spec, document, upsert=False, multi=False, safe=None, flags=0, **kwargs):
"""Update document(s) in this collection
*Please consider using new-style* :meth:`update_one()`, :meth:`update_many()`
and :meth:`replace_one()` *methods instead.*
:raises TypeError:
if `spec` or `document` are not instances of `dict`
or `upsert` is not an instance of `bool`.
:param spec:
query document that selects documents to be updated
:param document:
update document to be used for updating or upserting. See
`MongoDB Update docs
<https://docs.mongodb.org/manual/tutorial/modify-documents/>`_
for the format of this document and allowed operators.
:param upsert:
perform an upsert if ``True``
:param multi:
update all documents that match `spec`, rather than just the first
matching document. The default value is ``False``.
:param safe:
``True`` or ``False`` forces usage of respectively acknowledged or
unacknowledged Write Concern. If ``None``, :attr:`write_concern` is
used.
:returns:
:class:`Deferred` that is called back when request is sent to
MongoDB or confirmed by MongoDB (depending on selected Write Concern).
"""
if not isinstance(spec, dict):
raise TypeError("TxMongo: spec must be an instance of dict.")
if not isinstance(document, dict):
raise TypeError("TxMongo: document must be an instance of dict.")
if not isinstance(upsert, bool):
raise TypeError("TxMongo: upsert must be an instance of bool.")
if multi:
flags |= UPDATE_MULTI
if upsert:
flags |= UPDATE_UPSERT
spec = BSON.encode(spec)
document = BSON.encode(document)
update = Update(flags=flags, collection=str(self),
selector=spec, update=document)
def on_proto(proto):
check_deadline(kwargs.pop("_deadline", None))
proto.send_UPDATE(update)
write_concern = self._get_write_concern(safe, **kwargs)
if write_concern.acknowledged:
return proto.get_last_error(str(self._database), **write_concern.document)
return self._database.connection.getprotocol().addCallback(on_proto)
def _update(self, filter, update, upsert, multi, _deadline):
validate_is_mapping("filter", filter)
validate_boolean("upsert", upsert)
if self.write_concern.acknowledged:
updates = [SON([('q', filter), ('u', update),
("upsert", upsert), ("multi", multi)])]
command = SON([("update", self._collection_name),
("updates", updates),
("writeConcern", self.write_concern.document)])
def on_ok(raw_response):
_check_write_command_response(raw_response)
# Extract upserted_id from returned array
if raw_response.get("upserted"):
raw_response["upserted"] = raw_response["upserted"][0]["_id"]
return raw_response
return self._database.command(command, _deadline=_deadline).addCallback(on_ok)
else:
return self.update(filter, update, upsert=upsert, multi=multi,
_deadline=_deadline).addCallback(lambda _: None)
@timeout
def update_one(self, filter, update, upsert=False, _deadline=None):
"""update_one(filter, update, upsert=False)
Update a single document matching the filter.
:raises ValueError:
if `update` document is empty.
:raises ValueError:
if `update` document has any fields that don't start with `$` sign.
This method only allows *modification* of document (with `$set`,
`$inc`, etc.), not *replacing* it. For replacing use
:meth:`replace_one()` instead.
:param filter:
A query that matches the document to update.
:param update:
update document to be used for updating or upserting. See `MongoDB
Update docs <https://docs.mongodb.org/manual/tutorial/modify-documents/>`_
for allowed operators.
:param upsert:
If ``True``, perform an insert if no documents match the `filter`.
:returns:
deferred instance of :class:`pymongo.results.UpdateResult`.
"""
validate_ok_for_update(update)
def on_ok(raw_response):
return UpdateResult(raw_response, self.write_concern.acknowledged)
return self._update(filter, update, upsert, False, _deadline).addCallback(on_ok)
@timeout
def update_many(self, filter, update, upsert=False, _deadline=None):
"""update_many(filter, update, upsert=False)
Update one or more documents that match the filter.
:raises ValueError:
if `update` document is empty.
:raises ValueError:
if `update` document has fields that don't start with `$` sign.
This method only allows *modification* of document (with `$set`,
`$inc`, etc.), not *replacing* it. For replacing use
:meth:`replace_one()` instead.
:param filter:
A query that matches the documents to update.
:param update:
update document to be used for updating or upserting. See `MongoDB
Update docs <https://docs.mongodb.org/manual/tutorial/modify-documents/>`_
for allowed operators.
:param upsert:
If ``True``, perform an insert if no documents match the `filter`.
:returns:
deferred instance of :class:`pymongo.results.UpdateResult`.
"""
validate_ok_for_update(update)
def on_ok(raw_response):
return UpdateResult(raw_response, self.write_concern.acknowledged)
return self._update(filter, update, upsert, True, _deadline).addCallback(on_ok)
@timeout
def replace_one(self, filter, replacement, upsert=False, _deadline=None):
"""replace_one(filter, replacement, upsert=False)
Replace a single document matching the filter.
:raises ValueError:
if `update` document is empty
:raises ValueError:
if `update` document has fields that starts with `$` sign.
This method only allows *replacing* document completely. Use
:meth:`update_one()` for modifying existing document.
:param filter:
A query that matches the document to replace.
:param replacement:
The new document to replace with.
:param upsert:
If ``True``, perform an insert if no documents match the filter.
:returns:
deferred instance of :class:`pymongo.results.UpdateResult`.
"""
validate_ok_for_replace(replacement)
def on_ok(raw_response):
return UpdateResult(raw_response, self.write_concern.acknowledged)
return self._update(filter, replacement, upsert, False, _deadline).addCallback(on_ok)
@timeout
def save(self, doc, safe=None, **kwargs):
if not isinstance(doc, dict):
raise TypeError("TxMongo: cannot save objects of type {0}".format(type(doc)))
oid = doc.get("_id")
if oid:
return self.update({"_id": oid}, doc, safe=safe, upsert=True, **kwargs)
else:
return self.insert(doc, safe=safe, **kwargs)
@timeout
def remove(self, spec, safe=None, single=False, flags=0, **kwargs):
if isinstance(spec, ObjectId):
spec = SON(dict(_id=spec))
if not isinstance(spec, dict):
raise TypeError("TxMongo: spec must be an instance of dict, not {0}".format(type(spec)))
if single:
flags |= DELETE_SINGLE_REMOVE
spec = BSON.encode(spec)
delete = Delete(flags=flags, collection=str(self), selector=spec)
def on_proto(proto):
check_deadline(kwargs.pop("_deadline", None))
proto.send_DELETE(delete)
write_concern = self._get_write_concern(safe, **kwargs)
if write_concern.acknowledged:
return proto.get_last_error(str(self._database), **write_concern.document)
return self._database.connection.getprotocol().addCallback(on_proto)
def _delete(self, filter, multi, _deadline):
validate_is_mapping("filter", filter)
if self.write_concern.acknowledged:
deletes = [SON([('q', filter), ("limit", 0 if multi else 1)])]
command = SON([("delete", self._collection_name),
("deletes", deletes),
("writeConcern", self.write_concern.document)])
def on_ok(raw_response):
_check_write_command_response(raw_response)
return raw_response
return self._database.command(command, _deadline=_deadline).addCallback(on_ok)
else:
return self.remove(filter, single=not multi, _deadline=_deadline)\
.addCallback(lambda _: None)
@timeout
def delete_one(self, filter, _deadline=None):
"""delete_one(filter)"""
def on_ok(raw_response):
return DeleteResult(raw_response, self.write_concern.acknowledged)
return self._delete(filter, False, _deadline).addCallback(on_ok)
@timeout
def delete_many(self, filter, _deadline=None):
"""delete_many(filter)"""
def on_ok(raw_response):
return DeleteResult(raw_response, self.write_concern.acknowledged)
return self._delete(filter, True, _deadline).addCallback(on_ok)
@timeout
def drop(self, _deadline=None):
"""drop()"""
return self._database.drop_collection(self._collection_name, _deadline=_deadline)
def create_index(self, sort_fields, **kwargs):
if not isinstance(sort_fields, qf.sort):
raise TypeError("TxMongo: sort_fields must be an instance of filter.sort")
if "name" not in kwargs:
name = self._gen_index_name(sort_fields["orderby"])
else:
name = kwargs.pop("name")
key = SON()
for k, v in sort_fields["orderby"]:
key[k] = v
index = {"name": name, "key": key}
if "drop_dups" in kwargs:
index["dropDups"] = kwargs.pop("drop_dups")
if "bucket_size" in kwargs:
index["bucketSize"] = kwargs.pop("bucket_size")
index.update(kwargs)
return self._database.command("createIndexes", self._collection_name, indexes=[index])\
.addCallback(lambda _: name)
@timeout
def ensure_index(self, sort_fields, _deadline=None, **kwargs):
# ensure_index is an alias of create_index since we are not
# keeping an index cache same way pymongo does
return self.create_index(sort_fields, **kwargs)
@timeout
def drop_index(self, index_identifier, _deadline=None):
"""drop_index(index_identifier)"""
if isinstance(index_identifier, (bytes, unicode)):
name = index_identifier
elif isinstance(index_identifier, qf.sort):
name = self._gen_index_name(index_identifier["orderby"])
else:
raise TypeError("TxMongo: index_identifier must be a name or instance of filter.sort")
return self._database.command("deleteIndexes", self._collection_name,
index=name, allowable_errors=["ns not found"],
_deadline=_deadline)
@timeout
def drop_indexes(self, _deadline=None):
"""drop_indexes()"""
return self.drop_index("*", _deadline=_deadline)
def __index_information_3_0(self):
def on_ok(indexes_info):
assert indexes_info["cursor"]["id"] == 0
return indexes_info["cursor"]["firstBatch"]
codec = CodecOptions(document_class=SON)
return self._database.command("listIndexes", self.name, codec_options=codec)\
.addCallback(on_ok)
@timeout
def index_information(self, _deadline=None):
"""index_information()"""
def on_3_0_fail(failure):
failure.trap(OperationFailure)
return self._database.system.indexes.find({"ns": str(self)}, as_class=SON,
_deadline=_deadline)
def on_ok(raw):
info = {}
for idx in raw:
info[idx["name"]] = idx
return info
return self.__index_information_3_0().addErrback(on_3_0_fail).addCallback(on_ok)
@timeout
def rename(self, new_name, _deadline=None):
"""rename(new_name)"""
to = "%s.%s" % (str(self._database), new_name)
return self._database("admin").command("renameCollection", str(self), to=to,
_deadline=_deadline)
@timeout
def distinct(self, key, filter=None, _deadline=None, **kwargs):
"""distinct(key, filter=None)"""
params = {"key": key}
filter = kwargs.pop("spec", filter)
if filter:
params["query"] = filter
return self._database.command("distinct", self._collection_name, _deadline=_deadline,
**params).addCallback(lambda result: result.get("values"))
@timeout
def aggregate(self, pipeline, full_response=False, initial_batch_size=None, _deadline=None):
"""aggregate(pipeline, full_response=False)"""
def on_ok(raw, data=None):
if data is None:
data = []
if "firstBatch" in raw["cursor"]:
batch = raw["cursor"]["firstBatch"]
else:
batch = raw["cursor"].get("nextBatch", [])
data += batch
if raw["cursor"]["id"] == 0:
if full_response:
raw["result"] = data
return raw
return data
next_reply = self._database.command(
"getMore", collection=self._collection_name,
getMore=raw["cursor"]["id"]
)
return next_reply.addCallback(on_ok, data)
if initial_batch_size is None:
cursor = {}
else:
cursor = {"batchSize": initial_batch_size}
return self._database.command(
"aggregate", self._collection_name, pipeline=pipeline,
_deadline=_deadline, cursor=cursor
).addCallback(on_ok)
@timeout
def map_reduce(self, map, reduce, full_response=False, **kwargs):
params = {"map": map, "reduce": reduce}
params.update(**kwargs)
def on_ok(raw):
if full_response:
return raw
return raw.get("results")
return self._database.command("mapreduce", self._collection_name, **params)\
.addCallback(on_ok)
@timeout
def find_and_modify(self, query=None, update=None, upsert=False, **kwargs):
no_obj_error = "No matching object found"
if not update and not kwargs.get("remove", None):
raise ValueError("TxMongo: must either update or remove.")
if update and kwargs.get("remove", None):
raise ValueError("TxMongo: can't do both update and remove.")
params = kwargs
# No need to include empty args
if query:
params["query"] = query
if update:
params["update"] = update
if upsert:
params["upsert"] = upsert
def on_ok(result):
if not result["ok"]:
if result["errmsg"] == no_obj_error:
return None
else:
# Should never get here because of allowable_errors
raise ValueError("TxMongo: unexpected error '{0}'".format(result))
return result.get("value")
return self._database.command("findAndModify", self._collection_name,
allowable_errors=[no_obj_error],
**params).addCallback(on_ok)
# Distinct findAndModify utility method is needed because traditional
# find_and_modify() accepts `sort` kwarg as dict and passes it to
# MongoDB command without conversion. But in find_one_and_*
# methods we want to take `filter.sort` instances
def _new_find_and_modify(self, filter, projection, sort, upsert=None,
return_document=ReturnDocument.BEFORE, _deadline=None,
**kwargs):
validate_is_mapping("filter", filter)
if not isinstance(return_document, bool):
raise ValueError("TxMongo: return_document must be ReturnDocument.BEFORE "
"or ReturnDocument.AFTER")
cmd = SON([("findAndModify", self._collection_name),
("query", filter),
("new", return_document)])
cmd.update(kwargs)
if projection is not None:
cmd["fields"] = self._normalize_fields_projection(projection)
if sort is not None:
cmd["sort"] = SON(sort["orderby"])
if upsert is not None:
validate_boolean("upsert", upsert)
cmd["upsert"] = upsert
no_obj_error = "No matching object found"
return self._database.command(cmd, allowable_errors=[no_obj_error], _deadline=_deadline)\
.addCallback(lambda result: result.get("value"))
@timeout
def find_one_and_delete(self, filter, projection=None, sort=None, _deadline=None):
"""find_one_and_delete(filter, projection=None, sort=None, **kwargs)"""
return self._new_find_and_modify(filter, projection, sort, remove=True,
_deadline=_deadline)
@timeout
def find_one_and_replace(self, filter, replacement, projection=None, sort=None,
upsert=False, return_document=ReturnDocument.BEFORE,
_deadline=None):
"""find_one_and_replace(filter, replacement, projection=None, sort=None, upsert=False, return_document=ReturnDocument.BEFORE)"""
validate_ok_for_replace(replacement)
return self._new_find_and_modify(filter, projection, sort, upsert, return_document,
update=replacement, _deadline=_deadline)
@timeout
def find_one_and_update(self, filter, update, projection=None, sort=None,
upsert=False, return_document=ReturnDocument.BEFORE, _deadline=None):
"""find_one_and_update(filter, update, projection=None, sort=None, upsert=False, return_document=ReturnDocument.BEFORE)"""
validate_ok_for_update(update)
return self._new_find_and_modify(filter, projection, sort, upsert, return_document,
update=update, _deadline=_deadline)
def bulk_write(self, requests, ordered=True):
if not isinstance(requests, collections.Iterable):
raise TypeError("requests must be iterable")
requests = list(requests)
blk = _Bulk(self, ordered, bypass_document_validation=False)
for request in requests:
try:
request._add_to_bulk(blk)
except AttributeError:
raise TypeError("{} is not valid request".format(request))
return self._execute_bulk(blk)
def _execute_bulk(self, bulk):
if not bulk.ops:
raise InvalidOperation("No operations to execute")
if bulk.executed:
raise InvalidOperation("Bulk operations can only be executed once")
bulk.executed = True
if bulk.ordered:
generator = bulk.gen_ordered()
else:
generator = bulk.gen_unordered()
full_result = {
"writeErrors": [],
"writeConcernErrors": [],
"nInserted": 0,
"nUpserted": 0,
"nMatched": 0,
"nModified": 0,
"nRemoved": 0,
"upserted": [],
}
# iterate_func and on_cmd_result_func are just pointers to corresponding functions
# Direct passing of function's self-reference as callback to a deferred creates
# circular reference between a function object and a closure which can't be freed
# without garbage collector
def iterate(iterate_func, on_cmd_result_func):
try:
run = next(generator)
except StopIteration:
return defer.succeed(None)
return self._execute_batch_command(run.op_type, run.ops, bulk.ordered)\
.addCallback(on_cmd_result_func, run, iterate_func, on_cmd_result_func)
def on_cmd_result(result, run, iterate_func, on_cmd_result_func):
_merge_command(run, full_result, result)
if bulk.ordered and full_result["writeErrors"]:
return
return iterate_func(iterate_func, on_cmd_result_func)
def on_all_done(_):
if self.write_concern.acknowledged:
if full_result["writeErrors"] or full_result["writeConcernErrors"]:
if full_result["writeErrors"]:
full_result["writeErrors"].sort(key=lambda error: error["index"])
raise BulkWriteError(full_result)
return BulkWriteResult(full_result, self.write_concern.acknowledged)
return iterate(iterate, on_cmd_result).addCallback(on_all_done)
def _execute_batch_command(self, command_type, documents, ordered):
assert command_type in _OP_MAP
cmd_collname = str(self._database["$cmd"])
def on_proto(proto):
results = []
def accumulate_result(reply, idx_offset):
result = reply.documents[0].decode()
_check_command_response(result)
results.append((idx_offset, result))
return result
# There are four major cases with different behavior of insert_many:
# * Unack, Unordered: sending all batches and not handling responses at all
# so ignoring any errors
#
# * Ack, Unordered: sending all batches, accumulating all responses and
# returning aggregated response
#
# * Unack, Ordered: handling DB responses despite unacknowledged write_concern
# because we must stop on first error (not raising it though)
#
# * Ack, Ordered: stopping on first error and raising BulkWriteError
actual_write_concern = self.write_concern
if ordered and self.write_concern.acknowledged is False:
actual_write_concern = WriteConcern(w = 1)
batches = self._generate_batch_commands(self._collection_name, _COMMANDS[command_type],
_OP_MAP[command_type], documents, ordered,
actual_write_concern, proto.max_bson_size,
proto.max_write_batch_size)
all_responses = []
# for the meaning of iterate_func see the comment in _execute_bulk()
def iterate(iterate_func):
try:
idx_offset, batch = next(batches)
except StopIteration:
return defer.succeed(None)
batch_result = proto.send_QUERY(Query(collection=cmd_collname, query=batch))
if self.write_concern.acknowledged or ordered:
batch_result.addCallback(accumulate_result, idx_offset)
if ordered:
def on_batch_result(result):
if "writeErrors" in result:
return defer.succeed(None)
else:
return iterate_func(iterate_func)
return batch_result.addCallback(on_batch_result)
else:
all_responses.append(batch_result)
return iterate_func(iterate_func)
else:
return iterate_func(iterate_func)
def done(_):
def on_fail(failure):
failure.trap(defer.FirstError)
failure.value.subFailure.raiseException()
if self.write_concern.acknowledged and not ordered:
return defer.gatherResults(all_responses, consumeErrors=True)\
.addErrback(on_fail)
return iterate(iterate).addCallback(done).addCallback(lambda _: results)
return self._database.connection.getprotocol().addCallback(on_proto)
| apache-2.0 |
razvanphp/arangodb | 3rdParty/V8-3.31.74.1/third_party/python_26/Lib/sched.py | 59 | 5093 | """A generally useful event scheduler class.
Each instance of this class manages its own queue.
No multi-threading is implied; you are supposed to hack that
yourself, or use a single instance per application.
Each instance is parametrized with two functions, one that is
supposed to return the current time, one that is supposed to
implement a delay. You can implement real-time scheduling by
substituting time and sleep from built-in module time, or you can
implement simulated time by writing your own functions. This can
also be used to integrate scheduling with STDWIN events; the delay
function is allowed to modify the queue. Time can be expressed as
integers or floating point numbers, as long as it is consistent.
Events are specified by tuples (time, priority, action, argument).
As in UNIX, lower priority numbers mean higher priority; in this
way the queue can be maintained as a priority queue. Execution of the
event means calling the action function, passing it the argument
sequence in "argument" (remember that in Python, multiple function
arguments are be packed in a sequence).
The action function may be an instance method so it
has another way to reference private data (besides global variables).
"""
# XXX The timefunc and delayfunc should have been defined as methods
# XXX so you can define new kinds of schedulers using subclassing
# XXX instead of having to define a module or class just to hold
# XXX the global state of your particular time and delay functions.
import heapq
from collections import namedtuple
__all__ = ["scheduler"]
Event = namedtuple('Event', 'time, priority, action, argument')
class scheduler:
def __init__(self, timefunc, delayfunc):
"""Initialize a new instance, passing the time and delay
functions"""
self._queue = []
self.timefunc = timefunc
self.delayfunc = delayfunc
def enterabs(self, time, priority, action, argument):
"""Enter a new event in the queue at an absolute time.
Returns an ID for the event which can be used to remove it,
if necessary.
"""
event = Event(time, priority, action, argument)
heapq.heappush(self._queue, event)
return event # The ID
def enter(self, delay, priority, action, argument):
"""A variant that specifies the time as a relative time.
This is actually the more commonly used interface.
"""
time = self.timefunc() + delay
return self.enterabs(time, priority, action, argument)
def cancel(self, event):
"""Remove an event from the queue.
This must be presented the ID as returned by enter().
If the event is not in the queue, this raises RuntimeError.
"""
self._queue.remove(event)
heapq.heapify(self._queue)
def empty(self):
"""Check whether the queue is empty."""
return not self._queue
def run(self):
"""Execute events until the queue is empty.
When there is a positive delay until the first event, the
delay function is called and the event is left in the queue;
otherwise, the event is removed from the queue and executed
(its action function is called, passing it the argument). If
the delay function returns prematurely, it is simply
restarted.
It is legal for both the delay function and the action
function to to modify the queue or to raise an exception;
exceptions are not caught but the scheduler's state remains
well-defined so run() may be called again.
A questionable hack is added to allow other threads to run:
just after an event is executed, a delay of 0 is executed, to
avoid monopolizing the CPU when other threads are also
runnable.
"""
# localize variable access to minimize overhead
# and to improve thread safety
q = self._queue
delayfunc = self.delayfunc
timefunc = self.timefunc
pop = heapq.heappop
while q:
time, priority, action, argument = checked_event = q[0]
now = timefunc()
if now < time:
delayfunc(time - now)
else:
event = pop(q)
# Verify that the event was not removed or altered
# by another thread after we last looked at q[0].
if event is checked_event:
action(*argument)
delayfunc(0) # Let other threads run
else:
heapq.heappush(q, event)
@property
def queue(self):
"""An ordered list of upcoming events.
Events are named tuples with fields for:
time, priority, action, arguments
"""
# Use heapq to sort the queue rather than using 'sorted(self._queue)'.
# With heapq, two events scheduled at the same time will show in
# the actual order they would be retrieved.
events = self._queue[:]
return map(heapq.heappop, [events]*len(events))
| apache-2.0 |
mottosso/mindbender-setup | bin/windows/python36/Lib/unittest/test/test_discovery.py | 6 | 33037 | import os.path
from os.path import abspath
import re
import sys
import types
import pickle
import builtins
from test import support
import unittest
import unittest.test
class TestableTestProgram(unittest.TestProgram):
module = None
exit = True
defaultTest = failfast = catchbreak = buffer = None
verbosity = 1
progName = ''
testRunner = testLoader = None
def __init__(self):
pass
class TestDiscovery(unittest.TestCase):
# Heavily mocked tests so I can avoid hitting the filesystem
def test_get_name_from_path(self):
loader = unittest.TestLoader()
loader._top_level_dir = '/foo'
name = loader._get_name_from_path('/foo/bar/baz.py')
self.assertEqual(name, 'bar.baz')
if not __debug__:
# asserts are off
return
with self.assertRaises(AssertionError):
loader._get_name_from_path('/bar/baz.py')
def test_find_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['test2.py', 'test1.py', 'not_a_test.py', 'test_dir',
'test.foo', 'test-not-a-module.py', 'another_dir'],
['test4.py', 'test3.py', ]]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
def isdir(path):
return path.endswith('dir')
os.path.isdir = isdir
self.addCleanup(restore_isdir)
def isfile(path):
# another_dir is not a package and so shouldn't be recursed into
return not path.endswith('dir') and not 'another_dir' in path
os.path.isfile = isfile
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module + ' tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
top_level = os.path.abspath('/foo')
loader._top_level_dir = top_level
suite = list(loader._find_tests(top_level, 'test*.py'))
# The test suites found should be sorted alphabetically for reliable
# execution order.
expected = [[name + ' module tests'] for name in
('test1', 'test2', 'test_dir')]
expected.extend([[('test_dir.%s' % name) + ' module tests'] for name in
('test3', 'test4')])
self.assertEqual(suite, expected)
def test_find_tests_socket(self):
# A socket is neither a directory nor a regular file.
# https://bugs.python.org/issue25320
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['socket']]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: False
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: False
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module + ' tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
top_level = os.path.abspath('/foo')
loader._top_level_dir = top_level
suite = list(loader._find_tests(top_level, 'test*.py'))
self.assertEqual(suite, [])
def test_find_tests_with_package(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: True
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: os.path.basename(path) not in directories
self.addCleanup(restore_isfile)
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if os.path.basename(path) == 'test_directory':
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return [self.path + ' load_tests']
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader._get_module_from_name = lambda name: Module(name)
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module.path + ' module tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
loader._top_level_dir = '/foo'
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests('/foo', 'test*'))
# We should have loaded tests from the a_directory and test_directory2
# directly and via load_tests for the test_directory package, which
# still calls the baseline module loader.
self.assertEqual(suite,
[['a_directory module tests'],
['test_directory load_tests',
'test_directory module tests'],
['test_directory2 module tests']])
# The test module paths should be sorted for reliable execution order
self.assertEqual(Module.paths,
['a_directory', 'test_directory', 'test_directory2'])
# load_tests should have been called once with loader, tests and pattern
# (but there are no tests in our stub module itself, so thats [] at the
# time of call.
self.assertEqual(Module.load_tests_args,
[(loader, [], 'test*')])
def test_find_tests_default_calls_package_load_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: True
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: os.path.basename(path) not in directories
self.addCleanup(restore_isfile)
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if os.path.basename(path) == 'test_directory':
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return [self.path + ' load_tests']
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader._get_module_from_name = lambda name: Module(name)
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module.path + ' module tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
loader._top_level_dir = '/foo'
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests('/foo', 'test*.py'))
# We should have loaded tests from the a_directory and test_directory2
# directly and via load_tests for the test_directory package, which
# still calls the baseline module loader.
self.assertEqual(suite,
[['a_directory module tests'],
['test_directory load_tests',
'test_directory module tests'],
['test_directory2 module tests']])
# The test module paths should be sorted for reliable execution order
self.assertEqual(Module.paths,
['a_directory', 'test_directory', 'test_directory2'])
# load_tests should have been called once with loader, tests and pattern
self.assertEqual(Module.load_tests_args,
[(loader, [], 'test*.py')])
def test_find_tests_customize_via_package_pattern(self):
# This test uses the example 'do-nothing' load_tests from
# https://docs.python.org/3/library/unittest.html#load-tests-protocol
# to make sure that that actually works.
# Housekeeping
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
self.addCleanup(restore_listdir)
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
self.addCleanup(restore_isfile)
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
self.addCleanup(restore_isdir)
self.addCleanup(sys.path.remove, abspath('/foo'))
# Test data: we expect the following:
# a listdir to find our package, and isfile and isdir checks on it.
# a module-from-name call to turn that into a module
# followed by load_tests.
# then our load_tests will call discover() which is messy
# but that finally chains into find_tests again for the child dir -
# which is why we don't have an infinite loop.
# We expect to see:
# the module load tests for both package and plain module called,
# and the plain module result nested by the package module load_tests
# indicating that it was processed and could have been mutated.
vfs = {abspath('/foo'): ['my_package'],
abspath('/foo/my_package'): ['__init__.py', 'test_module.py']}
def list_dir(path):
return list(vfs[path])
os.listdir = list_dir
os.path.isdir = lambda path: not path.endswith('.py')
os.path.isfile = lambda path: path.endswith('.py')
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if path.endswith('test_module'):
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return [self.path + ' load_tests']
else:
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
# top level directory cached on loader instance
__file__ = '/foo/my_package/__init__.py'
this_dir = os.path.dirname(__file__)
pkg_tests = loader.discover(
start_dir=this_dir, pattern=pattern)
return [self.path + ' load_tests', tests
] + pkg_tests
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader = unittest.TestLoader()
loader._get_module_from_name = lambda name: Module(name)
loader.suiteClass = lambda thing: thing
loader._top_level_dir = abspath('/foo')
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests(abspath('/foo'), 'test*.py'))
# We should have loaded tests from both my_package and
# my_package.test_module, and also run the load_tests hook in both.
# (normally this would be nested TestSuites.)
self.assertEqual(suite,
[['my_package load_tests', [],
['my_package.test_module load_tests']]])
# Parents before children.
self.assertEqual(Module.paths,
['my_package', 'my_package.test_module'])
# load_tests should have been called twice with loader, tests and pattern
self.assertEqual(Module.load_tests_args,
[(loader, [], 'test*.py'),
(loader, [], 'test*.py')])
def test_discover(self):
loader = unittest.TestLoader()
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def restore_isfile():
os.path.isfile = original_isfile
os.path.isfile = lambda path: False
self.addCleanup(restore_isfile)
orig_sys_path = sys.path[:]
def restore_path():
sys.path[:] = orig_sys_path
self.addCleanup(restore_path)
full_path = os.path.abspath(os.path.normpath('/foo'))
with self.assertRaises(ImportError):
loader.discover('/foo/bar', top_level_dir='/foo')
self.assertEqual(loader._top_level_dir, full_path)
self.assertIn(full_path, sys.path)
os.path.isfile = lambda path: True
os.path.isdir = lambda path: True
def restore_isdir():
os.path.isdir = original_isdir
self.addCleanup(restore_isdir)
_find_tests_args = []
def _find_tests(start_dir, pattern, namespace=None):
_find_tests_args.append((start_dir, pattern))
return ['tests']
loader._find_tests = _find_tests
loader.suiteClass = str
suite = loader.discover('/foo/bar/baz', 'pattern', '/foo/bar')
top_level_dir = os.path.abspath('/foo/bar')
start_dir = os.path.abspath('/foo/bar/baz')
self.assertEqual(suite, "['tests']")
self.assertEqual(loader._top_level_dir, top_level_dir)
self.assertEqual(_find_tests_args, [(start_dir, 'pattern')])
self.assertIn(top_level_dir, sys.path)
def test_discover_start_dir_is_package_calls_package_load_tests(self):
# This test verifies that the package load_tests in a package is indeed
# invoked when the start_dir is a package (and not the top level).
# http://bugs.python.org/issue22457
# Test data: we expect the following:
# an isfile to verify the package, then importing and scanning
# as per _find_tests' normal behaviour.
# We expect to see our load_tests hook called once.
vfs = {abspath('/toplevel'): ['startdir'],
abspath('/toplevel/startdir'): ['__init__.py']}
def list_dir(path):
return list(vfs[path])
self.addCleanup(setattr, os, 'listdir', os.listdir)
os.listdir = list_dir
self.addCleanup(setattr, os.path, 'isfile', os.path.isfile)
os.path.isfile = lambda path: path.endswith('.py')
self.addCleanup(setattr, os.path, 'isdir', os.path.isdir)
os.path.isdir = lambda path: not path.endswith('.py')
self.addCleanup(sys.path.remove, abspath('/toplevel'))
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
def load_tests(self, loader, tests, pattern):
return ['load_tests called ' + self.path]
def __eq__(self, other):
return self.path == other.path
loader = unittest.TestLoader()
loader._get_module_from_name = lambda name: Module(name)
loader.suiteClass = lambda thing: thing
suite = loader.discover('/toplevel/startdir', top_level_dir='/toplevel')
# We should have loaded tests from the package __init__.
# (normally this would be nested TestSuites.)
self.assertEqual(suite,
[['load_tests called startdir']])
def setup_import_issue_tests(self, fakefile):
listdir = os.listdir
os.listdir = lambda _: [fakefile]
isfile = os.path.isfile
os.path.isfile = lambda _: True
orig_sys_path = sys.path[:]
def restore():
os.path.isfile = isfile
os.listdir = listdir
sys.path[:] = orig_sys_path
self.addCleanup(restore)
def setup_import_issue_package_tests(self, vfs):
self.addCleanup(setattr, os, 'listdir', os.listdir)
self.addCleanup(setattr, os.path, 'isfile', os.path.isfile)
self.addCleanup(setattr, os.path, 'isdir', os.path.isdir)
self.addCleanup(sys.path.__setitem__, slice(None), list(sys.path))
def list_dir(path):
return list(vfs[path])
os.listdir = list_dir
os.path.isdir = lambda path: not path.endswith('.py')
os.path.isfile = lambda path: path.endswith('.py')
def test_discover_with_modules_that_fail_to_import(self):
loader = unittest.TestLoader()
self.setup_import_issue_tests('test_this_does_not_exist.py')
suite = loader.discover('.')
self.assertIn(os.getcwd(), sys.path)
self.assertEqual(suite.countTestCases(), 1)
# Errors loading the suite are also captured for introspection.
self.assertNotEqual([], loader.errors)
self.assertEqual(1, len(loader.errors))
error = loader.errors[0]
self.assertTrue(
'Failed to import test module: test_this_does_not_exist' in error,
'missing error string in %r' % error)
test = list(list(suite)[0])[0] # extract test from suite
with self.assertRaises(ImportError):
test.test_this_does_not_exist()
def test_discover_with_init_modules_that_fail_to_import(self):
vfs = {abspath('/foo'): ['my_package'],
abspath('/foo/my_package'): ['__init__.py', 'test_module.py']}
self.setup_import_issue_package_tests(vfs)
import_calls = []
def _get_module_from_name(name):
import_calls.append(name)
raise ImportError("Cannot import Name")
loader = unittest.TestLoader()
loader._get_module_from_name = _get_module_from_name
suite = loader.discover(abspath('/foo'))
self.assertIn(abspath('/foo'), sys.path)
self.assertEqual(suite.countTestCases(), 1)
# Errors loading the suite are also captured for introspection.
self.assertNotEqual([], loader.errors)
self.assertEqual(1, len(loader.errors))
error = loader.errors[0]
self.assertTrue(
'Failed to import test module: my_package' in error,
'missing error string in %r' % error)
test = list(list(suite)[0])[0] # extract test from suite
with self.assertRaises(ImportError):
test.my_package()
self.assertEqual(import_calls, ['my_package'])
# Check picklability
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickle.loads(pickle.dumps(test, proto))
def test_discover_with_module_that_raises_SkipTest_on_import(self):
loader = unittest.TestLoader()
def _get_module_from_name(name):
raise unittest.SkipTest('skipperoo')
loader._get_module_from_name = _get_module_from_name
self.setup_import_issue_tests('test_skip_dummy.py')
suite = loader.discover('.')
self.assertEqual(suite.countTestCases(), 1)
result = unittest.TestResult()
suite.run(result)
self.assertEqual(len(result.skipped), 1)
# Check picklability
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickle.loads(pickle.dumps(suite, proto))
def test_discover_with_init_module_that_raises_SkipTest_on_import(self):
vfs = {abspath('/foo'): ['my_package'],
abspath('/foo/my_package'): ['__init__.py', 'test_module.py']}
self.setup_import_issue_package_tests(vfs)
import_calls = []
def _get_module_from_name(name):
import_calls.append(name)
raise unittest.SkipTest('skipperoo')
loader = unittest.TestLoader()
loader._get_module_from_name = _get_module_from_name
suite = loader.discover(abspath('/foo'))
self.assertIn(abspath('/foo'), sys.path)
self.assertEqual(suite.countTestCases(), 1)
result = unittest.TestResult()
suite.run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(import_calls, ['my_package'])
# Check picklability
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickle.loads(pickle.dumps(suite, proto))
def test_command_line_handling_parseArgs(self):
program = TestableTestProgram()
args = []
program._do_discovery = args.append
program.parseArgs(['something', 'discover'])
self.assertEqual(args, [[]])
args[:] = []
program.parseArgs(['something', 'discover', 'foo', 'bar'])
self.assertEqual(args, [['foo', 'bar']])
def test_command_line_handling_discover_by_default(self):
program = TestableTestProgram()
args = []
program._do_discovery = args.append
program.parseArgs(['something'])
self.assertEqual(args, [[]])
self.assertEqual(program.verbosity, 1)
self.assertIs(program.buffer, False)
self.assertIs(program.catchbreak, False)
self.assertIs(program.failfast, False)
def test_command_line_handling_discover_by_default_with_options(self):
program = TestableTestProgram()
args = []
program._do_discovery = args.append
program.parseArgs(['something', '-v', '-b', '-v', '-c', '-f'])
self.assertEqual(args, [[]])
self.assertEqual(program.verbosity, 2)
self.assertIs(program.buffer, True)
self.assertIs(program.catchbreak, True)
self.assertIs(program.failfast, True)
def test_command_line_handling_do_discovery_too_many_arguments(self):
program = TestableTestProgram()
program.testLoader = None
with support.captured_stderr() as stderr, \
self.assertRaises(SystemExit) as cm:
# too many args
program._do_discovery(['one', 'two', 'three', 'four'])
self.assertEqual(cm.exception.args, (2,))
self.assertIn('usage:', stderr.getvalue())
def test_command_line_handling_do_discovery_uses_default_loader(self):
program = object.__new__(unittest.TestProgram)
program._initArgParsers()
class Loader(object):
args = []
def discover(self, start_dir, pattern, top_level_dir):
self.args.append((start_dir, pattern, top_level_dir))
return 'tests'
program.testLoader = Loader()
program._do_discovery(['-v'])
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
def test_command_line_handling_do_discovery_calls_loader(self):
program = TestableTestProgram()
class Loader(object):
args = []
def discover(self, start_dir, pattern, top_level_dir):
self.args.append((start_dir, pattern, top_level_dir))
return 'tests'
program._do_discovery(['-v'], Loader=Loader)
self.assertEqual(program.verbosity, 2)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['--verbose'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery([], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs', 'ham'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', 'ham')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-s', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-t', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', 'fish')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'fish', None)])
self.assertFalse(program.failfast)
self.assertFalse(program.catchbreak)
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'eggs', '-s', 'fish', '-v', '-f', '-c'],
Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
self.assertEqual(program.verbosity, 2)
self.assertTrue(program.failfast)
self.assertTrue(program.catchbreak)
def setup_module_clash(self):
class Module(object):
__file__ = 'bar/foo.py'
sys.modules['foo'] = Module
full_path = os.path.abspath('foo')
original_listdir = os.listdir
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def cleanup():
os.listdir = original_listdir
os.path.isfile = original_isfile
os.path.isdir = original_isdir
del sys.modules['foo']
if full_path in sys.path:
sys.path.remove(full_path)
self.addCleanup(cleanup)
def listdir(_):
return ['foo.py']
def isfile(_):
return True
def isdir(_):
return True
os.listdir = listdir
os.path.isfile = isfile
os.path.isdir = isdir
return full_path
def test_detect_module_clash(self):
full_path = self.setup_module_clash()
loader = unittest.TestLoader()
mod_dir = os.path.abspath('bar')
expected_dir = os.path.abspath('foo')
msg = re.escape(r"'foo' module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?" % (mod_dir, expected_dir))
self.assertRaisesRegex(
ImportError, '^%s$' % msg, loader.discover,
start_dir='foo', pattern='foo.py'
)
self.assertEqual(sys.path[0], full_path)
def test_module_symlink_ok(self):
full_path = self.setup_module_clash()
original_realpath = os.path.realpath
mod_dir = os.path.abspath('bar')
expected_dir = os.path.abspath('foo')
def cleanup():
os.path.realpath = original_realpath
self.addCleanup(cleanup)
def realpath(path):
if path == os.path.join(mod_dir, 'foo.py'):
return os.path.join(expected_dir, 'foo.py')
return path
os.path.realpath = realpath
loader = unittest.TestLoader()
loader.discover(start_dir='foo', pattern='foo.py')
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern, namespace=None):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
def test_discovery_from_dotted_path_builtin_modules(self):
loader = unittest.TestLoader()
listdir = os.listdir
os.listdir = lambda _: ['test_this_does_not_exist.py']
isfile = os.path.isfile
isdir = os.path.isdir
os.path.isdir = lambda _: False
orig_sys_path = sys.path[:]
def restore():
os.path.isfile = isfile
os.path.isdir = isdir
os.listdir = listdir
sys.path[:] = orig_sys_path
self.addCleanup(restore)
with self.assertRaises(TypeError) as cm:
loader.discover('sys')
self.assertEqual(str(cm.exception),
'Can not use builtin modules '
'as dotted module names')
def test_discovery_from_dotted_namespace_packages(self):
loader = unittest.TestLoader()
orig_import = __import__
package = types.ModuleType('package')
package.__path__ = ['/a', '/b']
package.__spec__ = types.SimpleNamespace(
loader=None,
submodule_search_locations=['/a', '/b']
)
def _import(packagename, *args, **kwargs):
sys.modules[packagename] = package
return package
def cleanup():
builtins.__import__ = orig_import
self.addCleanup(cleanup)
builtins.__import__ = _import
_find_tests_args = []
def _find_tests(start_dir, pattern, namespace=None):
_find_tests_args.append((start_dir, pattern))
return ['%s/tests' % start_dir]
loader._find_tests = _find_tests
loader.suiteClass = list
suite = loader.discover('package')
self.assertEqual(suite, ['/a/tests', '/b/tests'])
def test_discovery_failed_discovery(self):
loader = unittest.TestLoader()
package = types.ModuleType('package')
orig_import = __import__
def _import(packagename, *args, **kwargs):
sys.modules[packagename] = package
return package
def cleanup():
builtins.__import__ = orig_import
self.addCleanup(cleanup)
builtins.__import__ = _import
with self.assertRaises(TypeError) as cm:
loader.discover('package')
self.assertEqual(str(cm.exception),
'don\'t know how to discover from {!r}'
.format(package))
if __name__ == '__main__':
unittest.main()
| mit |
freevo/kaa-candy | src/widgets/text.py | 1 | 5250 | # -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# text.py - multiline text widget
# -----------------------------------------------------------------------------
# kaa-candy - Fourth generation Canvas System using Clutter as backend
# Copyright (C) 2011 Dirk Meyer
#
# First Version: Dirk Meyer <https://github.com/Dischi>
# Maintainer: Dirk Meyer <https://github.com/Dischi>
#
# Based on various previous attempts to create a canvas system for
# Freevo by Dirk Meyer and Jason Tackaberry. Please see the file
# AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
__all__ = [ 'Text' ]
import re
import kaa.base
import cairo
from gi.repository import Pango, PangoCairo
from widget import Widget
from ..core import Color, Font, create_cairo_context
class Text(Widget):
candyxml_name = 'text'
candy_backend = 'candy.Text'
attributes = [ 'color', 'font', 'text', 'align' ]
attribute_types = {
'color': Color,
'font': Font
}
__intrinsic_size_param = None
__intrinsic_size_cache = None
__text_regexp = re.compile('\$([a-zA-Z][a-zA-Z0-9_\.]*)|\${([^}]*)}')
__text = None
def __init__(self, pos, size, text, font, color, align=None, condition=None, context=None):
"""
Create Text widget. Unlike a Label a Text widget supports multi-line
text and markup. See the pango markup documentation.
@param pos: (x,y) position of the widget or None
@param size: (width,height) geometry of the widget
@param text: text to show
@param color: kaa.candy.Color to fill the text
@param align: align value
@param context: the context the widget is created in
"""
super(Text, self).__init__(pos, size, context)
self._condition = condition
self.align = align or Widget.ALIGN_LEFT
self.font = font
self.text = text
self.color = color
@property
def text(self):
return self.__text
@text.setter
def text(self, text):
self.__text_provided = text
def replace_context(matchobj):
match = matchobj.groups()[0] or matchobj.groups()[1]
s = self.context.get(match, '')
if s is not None:
return kaa.base.py3_str(s, coerce=True)
return ''
if self.context:
# we have a context, use it
if self._condition and not self.context.get(self._condition):
text = ' ' # why does '' not work on update?
text = re.sub(self.__text_regexp, replace_context, text)
if self.__text == text:
return
self.__text = text
self.queue_rendering()
def sync_context(self):
"""
Adjust to a new context
"""
self.text = self.__text_provided
def sync_layout(self, size):
"""
Sync layout changes and calculate intrinsic size based on the
parent's size.
"""
super(Text, self).sync_layout(size)
width, height = self.size
if self.__intrinsic_size_param == (width, height, self.text, self.font.name, self.font.size):
self.intrinsic_size = self.__intrinsic_size_cache
return self.__intrinsic_size_cache
cr = create_cairo_context()
layout = PangoCairo.create_layout(cr)
layout.set_width(width * Pango.SCALE)
layout.set_height(height * Pango.SCALE)
layout.set_ellipsize(Pango.EllipsizeMode.END)
layout.set_wrap(Pango.WrapMode.WORD_CHAR)
layout.set_font_description(self.font.get_font_description())
layout.set_text(self.text, -1)
PangoCairo.show_layout(cr, layout)
self.__intrinsic_size_cache = \
int(min(width, Pango.units_to_double(layout.get_size()[0]))), \
int(min(height, Pango.units_to_double(layout.get_size()[1])))
self.__intrinsic_size_param = (width, height, self.text, self.font.name, self.font.size)
self.intrinsic_size = self.__intrinsic_size_cache
return self.__intrinsic_size_cache
@classmethod
def candyxml_parse(cls, element):
"""
Parse the XML element for parameter to create the widget.
"""
return super(Text, cls).candyxml_parse(element).update(
text=element.content, align=element.align, color=element.color,
font=element.font, condition=element.condition)
| gpl-2.0 |
sangood/spksrc | spk/gentoo-chroot/src/app/application/direct.py | 34 | 4465 | from pyextdirect.configuration import create_configuration, expose, LOAD, STORE_READ, STORE_CUD
from pyextdirect.router import Router
import os
import subprocess
from config import *
from db import *
__all__ = ['Base', 'Services', 'Overview']
Base = create_configuration()
class Services(Base):
def __init__(self):
self.session = Session()
@expose(kind=STORE_CUD)
def create(self, data):
results = []
for record in data:
service = Service(name=record['name'], launch_script=record['launch_script'], status_command=record['status_command'])
self.session.add(service)
self.session.commit()
results.append({'id': service.id, 'name': service.name, 'launch_script': service.launch_script,
'status_command': service.status_command, 'status': service.status})
return results
@expose(kind=STORE_READ)
def read(self):
results = []
for service in self.session.query(Service).all():
results.append({'id': service.id, 'name': service.name, 'launch_script': service.launch_script,
'status_command': service.status_command, 'status': service.status})
return results
@expose(kind=STORE_CUD)
def update(self, data):
results = []
for record in data:
service = self.session.query(Service).get(record['id'])
service.name = record['name']
service.launch_script = record['launch_script']
service.status_command = record['status_command']
results.append({'id': service.id, 'name': service.name, 'launch_script': service.launch_script,
'status_command': service.status_command, 'status': service.status})
self.session.commit()
return results
@expose(kind=STORE_CUD)
def destroy(self, data):
results = []
for service_id in data:
service = self.session.query(Service).get(service_id)
self.session.delete(service)
results.append({'id': service.id, 'name': service.name, 'launch_script': service.launch_script,
'status_command': service.status_command, 'status': service.status})
self.session.commit()
return [r['id'] for r in results]
@expose
def start(self, service_id):
service = self.session.query(Service).get(service_id)
return service.start()
@expose
def stop(self, service_id):
service = self.session.query(Service).get(service_id)
return service.stop()
def stop_all(self):
for service in self.session.query(Service).all():
if service.status:
service.stop()
def start_all(self):
for service in self.session.query(Service).all():
if not service.status:
service.start()
class Overview(Base):
def __init__(self):
self.session = Session()
@expose(kind=LOAD)
def load(self):
result = {'installed': 'installed' if self.is_installed() else 'installing',
'running_services': self.running_services(),
'updates': self.updates_count()}
return result
@expose
def updates_count(self):
with open(os.devnull, 'w') as devnull:
updates_count = int(subprocess.check_output(['chroot', chroottarget, '/bin/bash', '-c', 'emerge --update --deep @world --pretend --quiet | grep "U" | wc -l'], stdin=devnull, stderr=devnull, env={'PATH': env_path}))
return updates_count
@expose
def do_refresh(self):
with open(os.devnull, 'w') as devnull:
status = not subprocess.call(['chroot', chroottarget, '/bin/bash', '-c', 'emerge --sync --quiet'], stdin=devnull, stdout=devnull, stderr=devnull, env={'PATH': env_path})
if status:
return self.updates_count()
return status
@expose
def do_update(self):
with open(os.devnull, 'w') as devnull:
status = not subprocess.call(['chroot', chroottarget, '/bin/bash', '-c', 'emerge --update --deep @world --quiet'], stdin=devnull, stdout=devnull, stderr=devnull, env={'PATH': env_path})
return status
def is_installed(self):
return os.path.exists(installed)
def running_services(self):
return len([service for service in self.session.query(Service).all() if service.status == 1])
| bsd-3-clause |
halwai/cvxpy | cvxpy/atoms/elementwise/max_elemwise.py | 11 | 3324 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import cvxpy.utilities as u
import cvxpy.lin_ops.lin_utils as lu
from cvxpy.atoms.elementwise.elementwise import Elementwise
import numpy as np
if sys.version_info >= (3, 0):
from functools import reduce
class max_elemwise(Elementwise):
""" Elementwise maximum. """
def __init__(self, arg1, arg2, *args):
"""Requires at least 2 arguments.
"""
super(max_elemwise, self).__init__(arg1, arg2, *args)
@Elementwise.numpy_numeric
def numeric(self, values):
"""Returns the elementwise maximum.
"""
return reduce(np.maximum, values)
def sign_from_args(self):
"""Determins the sign of max_elemwise from the arguments' signs.
Reduces the list of argument signs according to the following rules:
POSITIVE, ANYTHING = POSITIVE
ZERO, UNKNOWN = POSITIVE
ZERO, ZERO = ZERO
ZERO, NEGATIVE = ZERO
UNKNOWN, NEGATIVE = UNKNOWN
NEGATIVE, NEGATIVE = NEGATIVE
Returns
-------
Sign
The Sign of the expression.
"""
arg_signs = [arg._dcp_attr.sign for arg in self.args]
if u.Sign.POSITIVE in arg_signs:
max_sign = u.Sign.POSITIVE
elif u.Sign.ZERO in arg_signs:
if u.Sign.UNKNOWN in arg_signs:
max_sign = u.Sign.POSITIVE
else:
max_sign = u.Sign.ZERO
elif u.Sign.UNKNOWN in arg_signs:
max_sign = u.Sign.UNKNOWN
else:
max_sign = u.Sign.NEGATIVE
return max_sign
def func_curvature(self):
"""The function's default curvature is convex.
"""
return u.Curvature.CONVEX
def monotonicity(self):
"""The function is increasing in each argument.
"""
return len(self.args)*[u.monotonicity.INCREASING]
@staticmethod
def graph_implementation(arg_objs, size, data=None):
"""Reduces the atom to an affine expression and list of constraints.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
t = lu.create_var(size)
constraints = []
for obj in arg_objs:
# Promote obj.
if obj.size != size:
obj = lu.promote(obj, size)
constraints.append(lu.create_leq(obj, t))
return (t, constraints)
| gpl-3.0 |
20uf/ansible | v1/ansible/runner/return_data.py | 133 | 2102 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils
class ReturnData(object):
''' internal return class for runner execute methods, not part of public API signature '''
__slots__ = [ 'result', 'comm_ok', 'host', 'diff' ]
def __init__(self, conn=None, host=None, result=None,
comm_ok=True, diff=dict()):
# which host is this ReturnData about?
if conn is not None:
self.host = conn.host
delegate = getattr(conn, 'delegate', None)
if delegate is not None:
self.host = delegate
else:
self.host = host
self.result = result
self.comm_ok = comm_ok
# if these values are set and used with --diff we can show
# changes made to particular files
self.diff = diff
if type(self.result) in [ str, unicode ]:
self.result = utils.parse_json(self.result, from_remote=True, no_exceptions=True)
if self.host is None:
raise Exception("host not set")
if type(self.result) != dict:
raise Exception("dictionary result expected")
def communicated_ok(self):
return self.comm_ok
def is_successful(self):
return self.comm_ok and (self.result.get('failed', False) == False) and ('failed_when_result' in self.result and [not self.result['failed_when_result']] or [self.result.get('rc',0) == 0])[0]
| gpl-3.0 |
carlessanagustin/ansible-playbooks | provision-ansible/library/proxmox.py | 85 | 16619 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: proxmox
short_description: management of instances in Proxmox VE cluster
description:
- allows you to create/delete/stop instances in Proxmox VE cluster
version_added: "2.0"
options:
api_host:
description:
- the host of the Proxmox VE cluster
required: true
api_user:
description:
- the user to authenticate with
required: true
api_password:
description:
- the password to authenticate with
- you can use PROXMOX_PASSWORD environment variable
default: null
required: false
vmid:
description:
- the instance id
default: null
required: true
validate_certs:
description:
- enable / disable https certificate verification
default: false
required: false
type: boolean
node:
description:
- Proxmox VE node, when new VM will be created
- required only for C(state=present)
- for another states will be autodiscovered
default: null
required: false
password:
description:
- the instance root password
- required only for C(state=present)
default: null
required: false
hostname:
description:
- the instance hostname
- required only for C(state=present)
default: null
required: false
ostemplate:
description:
- the template for VM creating
- required only for C(state=present)
default: null
required: false
disk:
description:
- hard disk size in GB for instance
default: 3
required: false
cpus:
description:
- numbers of allocated cpus for instance
default: 1
required: false
memory:
description:
- memory size in MB for instance
default: 512
required: false
swap:
description:
- swap memory size in MB for instance
default: 0
required: false
netif:
description:
- specifies network interfaces for the container
default: null
required: false
type: string
ip_address:
description:
- specifies the address the container will be assigned
default: null
required: false
type: string
onboot:
description:
- specifies whether a VM will be started during system bootup
default: false
required: false
type: boolean
storage:
description:
- target storage
default: 'local'
required: false
type: string
cpuunits:
description:
- CPU weight for a VM
default: 1000
required: false
type: integer
nameserver:
description:
- sets DNS server IP address for a container
default: null
required: false
type: string
searchdomain:
description:
- sets DNS search domain for a container
default: null
required: false
type: string
timeout:
description:
- timeout for operations
default: 30
required: false
type: integer
force:
description:
- forcing operations
- can be used only with states C(present), C(stopped), C(restarted)
- with C(state=present) force option allow to overwrite existing container
- with states C(stopped) , C(restarted) allow to force stop instance
default: false
required: false
type: boolean
state:
description:
- Indicate desired state of the instance
choices: ['present', 'started', 'absent', 'stopped', 'restarted']
default: present
notes:
- Requires proxmoxer and requests modules on host. This modules can be installed with pip.
requirements: [ "proxmoxer", "requests" ]
author: "Sergei Antipov @UnderGreen"
'''
EXAMPLES = '''
# Create new container with minimal options
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
# Create new container with minimal options with force(it will rewrite existing container)
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' force=yes
# Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
# Start container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=started
# Stop container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped
# Stop container with force
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' force=yes state=stopped
# Restart container(stopped or mounted container you can't restart)
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped
# Remove container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=absent
'''
import os
import time
try:
from proxmoxer import ProxmoxAPI
HAS_PROXMOXER = True
except ImportError:
HAS_PROXMOXER = False
def get_instance(proxmox, vmid):
return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ]
def content_check(proxmox, node, ostemplate, storage):
return [ True for cnt in proxmox.nodes(node).storage(storage).content.get() if cnt['volid'] == ostemplate ]
def node_check(proxmox, node):
return [ True for nd in proxmox.nodes.get() if nd['node'] == node ]
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
proxmox_node = proxmox.nodes(node)
taskid = proxmox_node.openvz.create(vmid=vmid, storage=storage, memory=memory, swap=swap,
cpus=cpus, disk=disk, **kwargs)
while timeout:
if ( proxmox_node.tasks(taskid).status.get()['status'] == 'stopped'
and proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def start_instance(module, proxmox, vm, vmid, timeout):
taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.start.post()
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s'
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def stop_instance(module, proxmox, vm, vmid, timeout, force):
if force:
taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.shutdown.post(forceStop=1)
else:
taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.shutdown.post()
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def umount_instance(module, proxmox, vm, vmid, timeout):
taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.umount.post()
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def main():
module = AnsibleModule(
argument_spec = dict(
api_host = dict(required=True),
api_user = dict(required=True),
api_password = dict(no_log=True),
vmid = dict(required=True),
validate_certs = dict(type='bool', choices=BOOLEANS, default='no'),
node = dict(),
password = dict(no_log=True),
hostname = dict(),
ostemplate = dict(),
disk = dict(type='int', default=3),
cpus = dict(type='int', default=1),
memory = dict(type='int', default=512),
swap = dict(type='int', default=0),
netif = dict(),
ip_address = dict(),
onboot = dict(type='bool', choices=BOOLEANS, default='no'),
storage = dict(default='local'),
cpuunits = dict(type='int', default=1000),
nameserver = dict(),
searchdomain = dict(),
timeout = dict(type='int', default=30),
force = dict(type='bool', choices=BOOLEANS, default='no'),
state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
)
)
if not HAS_PROXMOXER:
module.fail_json(msg='proxmoxer required for this module')
state = module.params['state']
api_user = module.params['api_user']
api_host = module.params['api_host']
api_password = module.params['api_password']
vmid = module.params['vmid']
validate_certs = module.params['validate_certs']
node = module.params['node']
disk = module.params['disk']
cpus = module.params['cpus']
memory = module.params['memory']
swap = module.params['swap']
storage = module.params['storage']
timeout = module.params['timeout']
# If password not set get it from PROXMOX_PASSWORD env
if not api_password:
try:
api_password = os.environ['PROXMOX_PASSWORD']
except KeyError, e:
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
try:
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
except Exception, e:
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
if state == 'present':
try:
if get_instance(proxmox, vmid) and not module.params['force']:
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
elif not node_check(proxmox, node):
module.fail_json(msg="node '%s' not exists in cluster" % node)
elif not content_check(proxmox, node, module.params['ostemplate'], storage):
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
% (module.params['ostemplate'], node, storage))
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
password = module.params['password'],
hostname = module.params['hostname'],
ostemplate = module.params['ostemplate'],
netif = module.params['netif'],
ip_address = module.params['ip_address'],
onboot = int(module.params['onboot']),
cpuunits = module.params['cpuunits'],
nameserver = module.params['nameserver'],
searchdomain = module.params['searchdomain'],
force = int(module.params['force']))
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
except Exception, e:
module.fail_json(msg="creation of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'started':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'running':
module.exit_json(changed=False, msg="VM %s is already running" % vmid)
if start_instance(module, proxmox, vm, vmid, timeout):
module.exit_json(changed=True, msg="VM %s started" % vmid)
except Exception, e:
module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'stopped':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted':
if module.params['force']:
if umount_instance(module, proxmox, vm, vmid, timeout):
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
else:
module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
"You can use force option to umount it.") % vmid)
if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'stopped':
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
if stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']):
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
except Exception, e:
module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'restarted':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if ( proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'stopped'
or proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted' ):
module.exit_json(changed=False, msg="VM %s is not running" % vmid)
if ( stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']) and
start_instance(module, proxmox, vm, vmid, timeout) ):
module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
except Exception, e:
module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'absent':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'running':
module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted':
module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
taskid = proxmox.nodes(vm[0]['node']).openvz.delete(vmid)
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
module.exit_json(changed=True, msg="VM %s removed" % vmid)
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
except Exception, e:
module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e ))
# import module snippets
from ansible.module_utils.basic import *
main()
| mit |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/numpy/f2py/crackfortran.py | 44 | 126845 | #!/usr/bin/env python
"""
crackfortran --- read fortran (77,90) code and extract declaration information.
Copyright 1999-2004 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/09/27 07:13:49 $
Pearu Peterson
Usage of crackfortran:
======================
Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
-m <module name for f77 routines>,--ignore-contains
Functions: crackfortran, crack2fortran
The following Fortran statements/constructions are supported
(or will be if needed):
block data,byte,call,character,common,complex,contains,data,
dimension,double complex,double precision,end,external,function,
implicit,integer,intent,interface,intrinsic,
logical,module,optional,parameter,private,public,
program,real,(sequence?),subroutine,type,use,virtual,
include,pythonmodule
Note: 'virtual' is mapped to 'dimension'.
Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
Note: code after 'contains' will be ignored until its scope ends.
Note: 'common' statement is extended: dimensions are moved to variable definitions
Note: f2py directive: <commentchar>f2py<line> is read as <line>
Note: pythonmodule is introduced to represent Python module
Usage:
`postlist=crackfortran(files,funcs)`
`postlist` contains declaration information read from the list of files `files`.
`crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
`postlist` has the following structure:
*** it is a list of dictionaries containing `blocks':
B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
'implicit','externals','interfaced','common','sortvars',
'commonvars','note']}
B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
'program' | 'block data' | 'type' | 'pythonmodule'
B['body'] --- list containing `subblocks' with the same structure as `blocks'
B['parent_block'] --- dictionary of a parent block:
C['body'][<index>]['parent_block'] is C
B['vars'] --- dictionary of variable definitions
B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
B['name'] --- name of the block (not if B['block']=='interface')
B['prefix'] --- prefix string (only if B['block']=='function')
B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
B['result'] --- name of the return value (only if B['block']=='function')
B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
B['externals'] --- list of variables being external
B['interfaced'] --- list of variables being external and defined
B['common'] --- dictionary of common blocks (list of objects)
B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
B['from'] --- string showing the 'parents' of the current block
B['use'] --- dictionary of modules used in current block:
{<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
B['note'] --- list of LaTeX comments on the block
B['f2pyenhancements'] --- optional dictionary
{'threadsafe':'','fortranname':<name>,
'callstatement':<C-expr>|<multi-line block>,
'callprotoargument':<C-expr-list>,
'usercode':<multi-line block>|<list of multi-line blocks>,
'pymethoddef:<multi-line block>'
}
B['entry'] --- dictionary {entryname:argslist,..}
B['varnames'] --- list of variable names given in the order of reading the
Fortran code, useful for derived types.
B['saved_interface'] --- a string of scanned routine signature, defines explicit interface
*** Variable definition is a dictionary
D = B['vars'][<variable name>] =
{'typespec'[,'attrspec','kindselector','charselector','=','typename']}
D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
'double precision' | 'integer' | 'logical' | 'real' | 'type'
D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)',
'optional','required', etc)
K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
'complex' | 'integer' | 'logical' | 'real' )
C = D['charselector'] = {['*','len','kind']}
(only if D['typespec']=='character')
D['='] --- initialization expression string
D['typename'] --- name of the type if D['typespec']=='type'
D['dimension'] --- list of dimension bounds
D['intent'] --- list of intent specifications
D['depend'] --- list of variable names on which current variable depends on
D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
D['note'] --- list of LaTeX comments on the variable
*** Meaning of kind/char selectors (few examples):
D['typespec>']*K['*']
D['typespec'](kind=K['kind'])
character*C['*']
character(len=C['len'],kind=C['kind'])
(see also fortran type declaration statement formats below)
Fortran 90 type declaration statement format (F77 is subset of F90)
====================================================================
(Main source: IBM XL Fortran 5.1 Language Reference Manual)
type declaration = <typespec> [[<attrspec>]::] <entitydecl>
<typespec> = byte |
character[<charselector>] |
complex[<kindselector>] |
double complex |
double precision |
integer[<kindselector>] |
logical[<kindselector>] |
real[<kindselector>] |
type(<typename>)
<charselector> = * <charlen> |
([len=]<len>[,[kind=]<kind>]) |
(kind=<kind>[,len=<len>])
<kindselector> = * <intlen> |
([kind=]<kind>)
<attrspec> = comma separated list of attributes.
Only the following attributes are used in
building up the interface:
external
(parameter --- affects '=' key)
optional
intent
Other attributes are ignored.
<intentspec> = in | out | inout
<arrayspec> = comma separated list of dimension bounds.
<entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
[/<init_expr>/ | =<init_expr>] [,<entitydecl>]
In addition, the following attributes are used: check,depend,note
TODO:
* Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
-> 'real x(2)')
The above may be solved by creating appropriate preprocessor program, for example.
"""
from __future__ import division, absolute_import, print_function
import sys
import string
import fileinput
import re
import os
import copy
import platform
from . import __version__
# The eviroment provided by auxfuncs.py is needed for some calls to eval.
# As the needed functions cannot be determined by static inspection of the
# code, it is safest to use import * pending a major refactoring of f2py.
from .auxfuncs import *
f2py_version = __version__.version
# Global flags:
strictf77 = 1 # Ignore `!' comments unless line[0]=='!'
sourcecodeform = 'fix' # 'fix','free'
quiet = 0 # Be verbose if 0 (Obsolete: not used any more)
verbose = 1 # Be quiet if 0, extra verbose if > 1.
tabchar = 4 * ' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0 # for old F77 programs without 'program' statement
ignorecontains = 1
dolowercase = 1
debug = []
# Global variables
beginpattern = ''
currentfilename = ''
expectbegin = 1
f90modulevars = {}
filepositiontext = ''
gotnextfile = 1
groupcache = None
groupcounter = 0
grouplist = {groupcounter: []}
groupname = ''
include_paths = []
neededmodule = -1
onlyfuncs = []
previous_context = None
skipblocksuntil = -1
skipfuncs = []
skipfunctions = []
usermodules = []
def reset_global_f2py_vars():
global groupcounter, grouplist, neededmodule, expectbegin
global skipblocksuntil, usermodules, f90modulevars, gotnextfile
global filepositiontext, currentfilename, skipfunctions, skipfuncs
global onlyfuncs, include_paths, previous_context
global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename
global f77modulename, skipemptyends, ignorecontains, dolowercase, debug
# flags
strictf77 = 1
sourcecodeform = 'fix'
quiet = 0
verbose = 1
tabchar = 4 * ' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0
ignorecontains = 1
dolowercase = 1
debug = []
# variables
groupcounter = 0
grouplist = {groupcounter: []}
neededmodule = -1
expectbegin = 1
skipblocksuntil = -1
usermodules = []
f90modulevars = {}
gotnextfile = 1
filepositiontext = ''
currentfilename = ''
skipfunctions = []
skipfuncs = []
onlyfuncs = []
include_paths = []
previous_context = None
def outmess(line, flag=1):
global filepositiontext
if not verbose:
return
if not quiet:
if flag:
sys.stdout.write(filepositiontext)
sys.stdout.write(line)
re._MAXCACHE = 50
defaultimplicitrules = {}
for c in "abcdefghopqrstuvwxyz$_":
defaultimplicitrules[c] = {'typespec': 'real'}
for c in "ijklmn":
defaultimplicitrules[c] = {'typespec': 'integer'}
del c
badnames = {}
invbadnames = {}
for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while',
'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union',
'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch',
'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto',
'len', 'rank', 'shape', 'index', 'slen', 'size', '_i',
'max', 'min',
'flen', 'fshape',
'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout',
'type', 'default']:
badnames[n] = n + '_bn'
invbadnames[n + '_bn'] = n
def rmbadname1(name):
if name in badnames:
errmess('rmbadname1: Replacing "%s" with "%s".\n' %
(name, badnames[name]))
return badnames[name]
return name
def rmbadname(names):
return [rmbadname1(_m) for _m in names]
def undo_rmbadname1(name):
if name in invbadnames:
errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'
% (name, invbadnames[name]))
return invbadnames[name]
return name
def undo_rmbadname(names):
return [undo_rmbadname1(_m) for _m in names]
def getextension(name):
i = name.rfind('.')
if i == -1:
return ''
if '\\' in name[i:]:
return ''
if '/' in name[i:]:
return ''
return name[i + 1:]
is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match
_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search
_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search
_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search
_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
f = open(file, 'r')
line = f.readline()
n = 15 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n > 0 and line:
if line[0] != '!' and line.strip():
n -= 1
if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&':
result = 1
break
line = f.readline()
f.close()
return result
# Read fortran (77,90) code
def readfortrancode(ffile, dowithline=show, istop=1):
"""
Read fortran codes from files and
1) Get rid of comments, line continuations, and empty lines; lower cases.
2) Call dowithline(line) on every line.
3) Recursively call itself when statement \"include '<filename>'\" is met.
"""
global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77
global beginpattern, quiet, verbose, dolowercase, include_paths
if not istop:
saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase
if ffile == []:
return
localdolowercase = dolowercase
cont = 0
finalline = ''
ll = ''
commentline = re.compile(
r'(?P<line>([^"]*["][^"]*["][^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!\'"]*))!{1}(?P<rest>.*)')
includeline = re.compile(
r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I)
cont1 = re.compile(r'(?P<line>.*)&\s*\Z')
cont2 = re.compile(r'(\s*&|)(?P<line>.*)')
mline_mark = re.compile(r".*?'''")
if istop:
dowithline('', -1)
ll, l1 = '', ''
spacedigits = [' '] + [str(_m) for _m in range(10)]
filepositiontext = ''
fin = fileinput.FileInput(ffile)
while True:
l = fin.readline()
if not l:
break
if fin.isfirstline():
filepositiontext = ''
currentfilename = fin.filename()
gotnextfile = 1
l1 = l
strictf77 = 0
sourcecodeform = 'fix'
ext = os.path.splitext(currentfilename)[1]
if is_f_file(currentfilename) and \
not (_has_f90_header(l) or _has_fix_header(l)):
strictf77 = 1
elif is_free_format(currentfilename) and not _has_fix_header(l):
sourcecodeform = 'free'
if strictf77:
beginpattern = beginpattern77
else:
beginpattern = beginpattern90
outmess('\tReading file %s (format:%s%s)\n'
% (repr(currentfilename), sourcecodeform,
strictf77 and ',strict' or ''))
l = l.expandtabs().replace('\xa0', ' ')
# Get rid of newline characters
while not l == '':
if l[-1] not in "\n\r\f":
break
l = l[:-1]
if not strictf77:
r = commentline.match(l)
if r:
l = r.group('line') + ' ' # Strip comments starting with `!'
rl = r.group('rest')
if rl[:4].lower() == 'f2py': # f2py directive
l = l + 4 * ' '
r = commentline.match(rl[4:])
if r:
l = l + r.group('line')
else:
l = l + rl[4:]
if l.strip() == '': # Skip empty line
cont = 0
continue
if sourcecodeform == 'fix':
if l[0] in ['*', 'c', '!', 'C', '#']:
if l[1:5].lower() == 'f2py': # f2py directive
l = ' ' + l[5:]
else: # Skip comment line
cont = 0
continue
elif strictf77:
if len(l) > 72:
l = l[:72]
if not (l[0] in spacedigits):
raise Exception('readfortrancode: Found non-(space,digit) char '
'in the first column.\n\tAre you sure that '
'this code is in fix form?\n\tline=%s' % repr(l))
if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '):
# Continuation of a previous line
ll = ll + l[6:]
finalline = ''
origfinalline = ''
else:
if not strictf77:
# F90 continuation
r = cont1.match(l)
if r:
l = r.group('line') # Continuation follows ..
if cont:
ll = ll + cont2.match(l).group('line')
finalline = ''
origfinalline = ''
else:
# clean up line beginning from possible digits.
l = ' ' + l[5:]
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
cont = (r is not None)
else:
# clean up line beginning from possible digits.
l = ' ' + l[5:]
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
elif sourcecodeform == 'free':
if not cont and ext == '.pyf' and mline_mark.match(l):
l = l + '\n'
while True:
lc = fin.readline()
if not lc:
errmess(
'Unexpected end of file when reading multiline\n')
break
l = l + lc
if mline_mark.match(lc):
break
l = l.rstrip()
r = cont1.match(l)
if r:
l = r.group('line') # Continuation follows ..
if cont:
ll = ll + cont2.match(l).group('line')
finalline = ''
origfinalline = ''
else:
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
cont = (r is not None)
else:
raise ValueError(
"Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform))
filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
fin.filelineno() - 1, currentfilename, l1)
m = includeline.match(origfinalline)
if m:
fn = m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [
os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
l1 = ll
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
fin.filelineno() - 1, currentfilename, l1)
m = includeline.match(origfinalline)
if m:
fn = m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
filepositiontext = ''
fin.close()
if istop:
dowithline('', 1)
else:
gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase = saveglobals
# Crack line
beforethisafter = r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))' + \
r'\s*(?P<this>(\b(%s)\b))' + \
r'\s*(?P<after>%s)\s*\Z'
##
fortrantypes = 'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte'
typespattern = re.compile(
beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type'
typespattern4implicit = re.compile(beforethisafter % (
'', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I)
#
functionpattern = re.compile(beforethisafter % (
'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin'
subroutinepattern = re.compile(beforethisafter % (
'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin'
# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin'
#
groupbegins77 = r'program|block\s*data'
beginpattern77 = re.compile(
beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin'
groupbegins90 = groupbegins77 + \
r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()'
beginpattern90 = re.compile(
beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin'
groupends = r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface'
endpattern = re.compile(
beforethisafter % ('', groupends, groupends, '[\w\s]*'), re.I), 'end'
# endifs='end\s*(if|do|where|select|while|forall)'
endifs = '(end\s*(if|do|where|select|while|forall))|(module\s*procedure)'
endifpattern = re.compile(
beforethisafter % ('[\w]*?', endifs, endifs, '[\w\s]*'), re.I), 'endif'
#
implicitpattern = re.compile(
beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit'
dimensionpattern = re.compile(beforethisafter % (
'', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension'
externalpattern = re.compile(
beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external'
optionalpattern = re.compile(
beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional'
requiredpattern = re.compile(
beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required'
publicpattern = re.compile(
beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public'
privatepattern = re.compile(
beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private'
intrisicpattern = re.compile(
beforethisafter % ('', 'intrisic', 'intrisic', '.*'), re.I), 'intrisic'
intentpattern = re.compile(beforethisafter % (
'', 'intent|depend|note|check', 'intent|depend|note|check', '\s*\(.*?\).*'), re.I), 'intent'
parameterpattern = re.compile(
beforethisafter % ('', 'parameter', 'parameter', '\s*\(.*'), re.I), 'parameter'
datapattern = re.compile(
beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data'
callpattern = re.compile(
beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call'
entrypattern = re.compile(
beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry'
callfunpattern = re.compile(
beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun'
commonpattern = re.compile(
beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common'
usepattern = re.compile(
beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use'
containspattern = re.compile(
beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains'
formatpattern = re.compile(
beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format'
# Non-fortran and f2py-specific statements
f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef',
'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements'
multilinepattern = re.compile(
r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline'
##
def _simplifyargs(argsline):
a = []
for n in markoutercomma(argsline).split('@,@'):
for r in '(),':
n = n.replace(r, '_')
a.append(n)
return ','.join(a)
crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+[\w]*\b)\s*[=].*', re.I)
def crackline(line, reset=0):
"""
reset=-1 --- initialize
reset=0 --- crack the line
reset=1 --- final check if mismatch of blocks occured
Cracked data is saved in grouplist[0].
"""
global beginpattern, groupcounter, groupname, groupcache, grouplist
global filepositiontext, currentfilename, neededmodule, expectbegin
global skipblocksuntil, skipemptyends, previous_context, gotnextfile
if ';' in line and not (f2pyenhancementspattern[0].match(line) or
multilinepattern[0].match(line)):
for l in line.split(';'):
# XXX: non-zero reset values need testing
assert reset == 0, repr(reset)
crackline(l, reset)
return
if reset < 0:
groupcounter = 0
groupname = {groupcounter: ''}
groupcache = {groupcounter: {}}
grouplist = {groupcounter: []}
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['block'] = ''
groupcache[groupcounter]['name'] = ''
neededmodule = -1
skipblocksuntil = -1
return
if reset > 0:
fl = 0
if f77modulename and neededmodule == groupcounter:
fl = 2
while groupcounter > fl:
outmess('crackline: groupcounter=%s groupname=%s\n' %
(repr(groupcounter), repr(groupname)))
outmess(
'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n')
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1
if f77modulename and neededmodule == groupcounter:
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end interface
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end module
neededmodule = -1
return
if line == '':
return
flag = 0
for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern,
requiredpattern,
parameterpattern, datapattern, publicpattern, privatepattern,
intrisicpattern,
endifpattern, endpattern,
formatpattern,
beginpattern, functionpattern, subroutinepattern,
implicitpattern, typespattern, commonpattern,
callpattern, usepattern, containspattern,
entrypattern,
f2pyenhancementspattern,
multilinepattern
]:
m = pat[0].match(line)
if m:
break
flag = flag + 1
if not m:
re_1 = crackline_re_1
if 0 <= skipblocksuntil <= groupcounter:
return
if 'externals' in groupcache[groupcounter]:
for name in groupcache[groupcounter]['externals']:
if name in invbadnames:
name = invbadnames[name]
if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']:
continue
m1 = re.match(
r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I)
if m1:
m2 = re_1.match(m1.group('before'))
a = _simplifyargs(m1.group('args'))
if m2:
line = 'callfun %s(%s) result (%s)' % (
name, a, m2.group('result'))
else:
line = 'callfun %s(%s)' % (name, a)
m = callfunpattern[0].match(line)
if not m:
outmess(
'crackline: could not resolve function call for line=%s.\n' % repr(line))
return
analyzeline(m, 'callfun', line)
return
if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')):
previous_context = None
outmess('crackline:%d: No pattern for line\n' % (groupcounter))
return
elif pat[1] == 'end':
if 0 <= skipblocksuntil < groupcounter:
groupcounter = groupcounter - 1
if skipblocksuntil <= groupcounter:
return
if groupcounter <= 0:
raise Exception('crackline: groupcounter(=%s) is nonpositive. '
'Check the blocks.'
% (groupcounter))
m1 = beginpattern[0].match((line))
if (m1) and (not m1.group('this') == groupname[groupcounter]):
raise Exception('crackline: End group %s does not match with '
'previous Begin group %s\n\t%s' %
(repr(m1.group('this')), repr(groupname[groupcounter]),
filepositiontext)
)
if skipblocksuntil == groupcounter:
skipblocksuntil = -1
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1
if not skipemptyends:
expectbegin = 1
elif pat[1] == 'begin':
if 0 <= skipblocksuntil <= groupcounter:
groupcounter = groupcounter + 1
return
gotnextfile = 0
analyzeline(m, pat[1], line)
expectbegin = 0
elif pat[1] == 'endif':
pass
elif pat[1] == 'contains':
if ignorecontains:
return
if 0 <= skipblocksuntil <= groupcounter:
return
skipblocksuntil = groupcounter
else:
if 0 <= skipblocksuntil <= groupcounter:
return
analyzeline(m, pat[1], line)
def markouterparen(line):
l = ''
f = 0
for c in line:
if c == '(':
f = f + 1
if f == 1:
l = l + '@(@'
continue
elif c == ')':
f = f - 1
if f == 0:
l = l + '@)@'
continue
l = l + c
return l
def markoutercomma(line, comma=','):
l = ''
f = 0
cc = ''
for c in line:
if (not cc or cc == ')') and c == '(':
f = f + 1
cc = ')'
elif not cc and c == '\'' and (not l or l[-1] != '\\'):
f = f + 1
cc = '\''
elif c == cc:
f = f - 1
if f == 0:
cc = ''
elif c == comma and f == 0:
l = l + '@' + comma + '@'
continue
l = l + c
assert not f, repr((f, line, l, cc))
return l
def unmarkouterparen(line):
r = line.replace('@(@', '(').replace('@)@', ')')
return r
def appenddecl(decl, decl2, force=1):
if not decl:
decl = {}
if not decl2:
return decl
if decl is decl2:
return decl
for k in list(decl2.keys()):
if k == 'typespec':
if force or k not in decl:
decl[k] = decl2[k]
elif k == 'attrspec':
for l in decl2[k]:
decl = setattrspec(decl, l, force)
elif k == 'kindselector':
decl = setkindselector(decl, decl2[k], force)
elif k == 'charselector':
decl = setcharselector(decl, decl2[k], force)
elif k in ['=', 'typename']:
if force or k not in decl:
decl[k] = decl2[k]
elif k == 'note':
pass
elif k in ['intent', 'check', 'dimension', 'optional', 'required']:
errmess('appenddecl: "%s" not implemented.\n' % k)
else:
raise Exception('appenddecl: Unknown variable definition key:' +
str(k))
return decl
selectpattern = re.compile(
r'\s*(?P<this>(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P<after>.*)\Z', re.I)
nameargspattern = re.compile(
r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I)
callnameargspattern = re.compile(
r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z', re.I)
real16pattern = re.compile(
r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)')
real8pattern = re.compile(
r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))')
_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I)
def _is_intent_callback(vdecl):
for a in vdecl.get('attrspec', []):
if _intentcallbackpattern.match(a):
return 1
return 0
def _resolvenameargspattern(line):
line = markouterparen(line)
m1 = nameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind')
m1 = callnameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), None, None
return None, [], None, None
def analyzeline(m, case, line):
global groupcounter, groupname, groupcache, grouplist, filepositiontext
global currentfilename, f77modulename, neededinterface, neededmodule
global expectbegin, gotnextfile, previous_context
block = m.group('this')
if case != 'multiline':
previous_context = None
if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \
and not skipemptyends and groupcounter < 1:
newname = os.path.basename(currentfilename).split('.')[0]
outmess(
'analyzeline: no group yet. Creating program group with name "%s".\n' % newname)
gotnextfile = 0
groupcounter = groupcounter + 1
groupname[groupcounter] = 'program'
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['block'] = 'program'
groupcache[groupcounter]['name'] = newname
groupcache[groupcounter]['from'] = 'fromsky'
expectbegin = 0
if case in ['begin', 'call', 'callfun']:
# Crack line => block,name,args,result
block = block.lower()
if re.match(r'block\s*data', block, re.I):
block = 'block data'
if re.match(r'python\s*module', block, re.I):
block = 'python module'
name, args, result, bind = _resolvenameargspattern(m.group('after'))
if name is None:
if block == 'block data':
name = '_BLOCK_DATA_'
else:
name = ''
if block not in ['interface', 'block data']:
outmess('analyzeline: No name/args pattern found for line.\n')
previous_context = (block, name, groupcounter)
if args:
args = rmbadname([x.strip()
for x in markoutercomma(args).split('@,@')])
else:
args = []
if '' in args:
while '' in args:
args.remove('')
outmess(
'analyzeline: argument list is malformed (missing argument).\n')
# end of crack line => block,name,args,result
needmodule = 0
needinterface = 0
if case in ['call', 'callfun']:
needinterface = 1
if 'args' not in groupcache[groupcounter]:
return
if name not in groupcache[groupcounter]['args']:
return
for it in grouplist[groupcounter]:
if it['name'] == name:
return
if name in groupcache[groupcounter]['interfaced']:
return
block = {'call': 'subroutine', 'callfun': 'function'}[case]
if f77modulename and neededmodule == -1 and groupcounter <= 1:
neededmodule = groupcounter + 2
needmodule = 1
if block != 'interface':
needinterface = 1
# Create new block(s)
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
if needmodule:
if verbose > 1:
outmess('analyzeline: Creating module block %s\n' %
repr(f77modulename), 0)
groupname[groupcounter] = 'module'
groupcache[groupcounter]['block'] = 'python module'
groupcache[groupcounter]['name'] = f77modulename
groupcache[groupcounter]['from'] = ''
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
if needinterface:
if verbose > 1:
outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (
groupcounter), 0)
groupname[groupcounter] = 'interface'
groupcache[groupcounter]['block'] = 'interface'
groupcache[groupcounter]['name'] = 'unknown_interface'
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
groupname[groupcounter] = block
groupcache[groupcounter]['block'] = block
if not name:
name = 'unknown_' + block
groupcache[groupcounter]['prefix'] = m.group('before')
groupcache[groupcounter]['name'] = rmbadname1(name)
groupcache[groupcounter]['result'] = result
if groupcounter == 1:
groupcache[groupcounter]['from'] = currentfilename
else:
if f77modulename and groupcounter == 3:
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], currentfilename)
else:
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
for k in list(groupcache[groupcounter].keys()):
if not groupcache[groupcounter][k]:
del groupcache[groupcounter][k]
groupcache[groupcounter]['args'] = args
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['entry'] = {}
# end of creation
if block == 'type':
groupcache[groupcounter]['varnames'] = []
if case in ['call', 'callfun']: # set parents variables
if name not in groupcache[groupcounter - 2]['externals']:
groupcache[groupcounter - 2]['externals'].append(name)
groupcache[groupcounter]['vars'] = copy.deepcopy(
groupcache[groupcounter - 2]['vars'])
try:
del groupcache[groupcounter]['vars'][name][
groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
except:
pass
if block in ['function', 'subroutine']: # set global attributes
try:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars'][''])
except:
pass
if case == 'callfun': # return type
if result and result in groupcache[groupcounter]['vars']:
if not name == result:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result])
# if groupcounter>1: # name is interfaced
try:
groupcache[groupcounter - 2]['interfaced'].append(name)
except:
pass
if block == 'function':
t = typespattern[0].match(m.group('before') + ' ' + name)
if t:
typespec, selector, attr, edecl = cracktypespec0(
t.group('this'), t.group('after'))
updatevars(typespec, selector, attr, edecl)
if case in ['call', 'callfun']:
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end routine
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end interface
elif case == 'entry':
name, args, result, bind = _resolvenameargspattern(m.group('after'))
if name is not None:
if args:
args = rmbadname([x.strip()
for x in markoutercomma(args).split('@,@')])
else:
args = []
assert result is None, repr(result)
groupcache[groupcounter]['entry'][name] = args
previous_context = ('entry', name, groupcounter)
elif case == 'type':
typespec, selector, attr, edecl = cracktypespec0(
block, m.group('after'))
last_name = updatevars(typespec, selector, attr, edecl)
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrisic']:
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()
i = ll.find('::')
if i < 0 and case == 'intent':
i = markouterparen(ll).find('@)@') - 2
ll = ll[:i + 1] + '::' + ll[i + 1:]
i = ll.find('::')
if ll[i:] == '::' and 'args' in groupcache[groupcounter]:
outmess('All arguments will have attribute %s%s\n' %
(m.group('this'), ll[:i]))
ll = ll + ','.join(groupcache[groupcounter]['args'])
if i < 0:
i = 0
pl = ''
else:
pl = ll[:i].strip()
ll = ll[i + 2:]
ch = markoutercomma(pl).split('@,@')
if len(ch) > 1:
pl = ch[0]
outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (
','.join(ch[1:])))
last_name = None
for e in [x.strip() for x in markoutercomma(ll).split('@,@')]:
m1 = namepattern.match(e)
if not m1:
if case in ['public', 'private']:
k = ''
else:
print(m.groupdict())
outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % (
case, repr(e)))
continue
else:
k = rmbadname1(m1.group('name'))
if k not in edecl:
edecl[k] = {}
if case == 'dimension':
ap = case + m1.group('after')
if case == 'intent':
ap = m.group('this') + pl
if _intentcallbackpattern.match(ap):
if k not in groupcache[groupcounter]['args']:
if groupcounter > 1:
if '__user__' not in groupcache[groupcounter - 2]['name']:
outmess(
'analyzeline: missing __user__ module (could be nothing)\n')
# fixes ticket 1693
if k != groupcache[groupcounter]['name']:
outmess('analyzeline: appending intent(callback) %s'
' to %s arguments\n' % (k, groupcache[groupcounter]['name']))
groupcache[groupcounter]['args'].append(k)
else:
errmess(
'analyzeline: intent(callback) %s is ignored' % (k))
else:
errmess('analyzeline: intent(callback) %s is already'
' in argument list' % (k))
if case in ['optional', 'required', 'public', 'external', 'private', 'intrisic']:
ap = case
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append(ap)
else:
edecl[k]['attrspec'] = [ap]
if case == 'external':
if groupcache[groupcounter]['block'] == 'program':
outmess('analyzeline: ignoring program arguments\n')
continue
if k not in groupcache[groupcounter]['args']:
continue
if 'externals' not in groupcache[groupcounter]:
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['externals'].append(k)
last_name = k
groupcache[groupcounter]['vars'] = edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'parameter':
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()[1:-1]
last_name = None
for e in markoutercomma(ll).split('@,@'):
try:
k, initexpr = [x.strip() for x in e.split('=')]
except:
outmess(
'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll))
continue
params = get_parameters(edecl)
k = rmbadname1(k)
if k not in edecl:
edecl[k] = {}
if '=' in edecl[k] and (not edecl[k]['='] == initexpr):
outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % (
k, edecl[k]['='], initexpr))
t = determineexprtype(initexpr, params)
if t:
if t.get('typespec') == 'real':
tt = list(initexpr)
for m in real16pattern.finditer(initexpr):
tt[m.start():m.end()] = list(
initexpr[m.start():m.end()].lower().replace('d', 'e'))
initexpr = ''.join(tt)
elif t.get('typespec') == 'complex':
initexpr = initexpr[1:].lower().replace('d', 'e').\
replace(',', '+1j*(')
try:
v = eval(initexpr, {}, params)
except (SyntaxError, NameError, TypeError) as msg:
errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'
% (initexpr, msg))
continue
edecl[k]['='] = repr(v)
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append('parameter')
else:
edecl[k]['attrspec'] = ['parameter']
last_name = k
groupcache[groupcounter]['vars'] = edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'implicit':
if m.group('after').strip().lower() == 'none':
groupcache[groupcounter]['implicit'] = None
elif m.group('after'):
if 'implicit' in groupcache[groupcounter]:
impl = groupcache[groupcounter]['implicit']
else:
impl = {}
if impl is None:
outmess(
'analyzeline: Overwriting earlier "implicit none" statement.\n')
impl = {}
for e in markoutercomma(m.group('after')).split('@,@'):
decl = {}
m1 = re.match(
r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z', e, re.I)
if not m1:
outmess(
'analyzeline: could not extract info of implicit statement part "%s"\n' % (e))
continue
m2 = typespattern4implicit.match(m1.group('this'))
if not m2:
outmess(
'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e))
continue
typespec, selector, attr, edecl = cracktypespec0(
m2.group('this'), m2.group('after'))
kindselect, charselect, typename = cracktypespec(
typespec, selector)
decl['typespec'] = typespec
decl['kindselector'] = kindselect
decl['charselector'] = charselect
decl['typename'] = typename
for k in list(decl.keys()):
if not decl[k]:
del decl[k]
for r in markoutercomma(m1.group('after')).split('@,@'):
if '-' in r:
try:
begc, endc = [x.strip() for x in r.split('-')]
except:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n' % r)
continue
else:
begc = endc = r.strip()
if not len(begc) == len(endc) == 1:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n' % r)
continue
for o in range(ord(begc), ord(endc) + 1):
impl[chr(o)] = decl
groupcache[groupcounter]['implicit'] = impl
elif case == 'data':
ll = []
dl = ''
il = ''
f = 0
fc = 1
inp = 0
for c in m.group('after'):
if not inp:
if c == "'":
fc = not fc
if c == '/' and fc:
f = f + 1
continue
if c == '(':
inp = inp + 1
elif c == ')':
inp = inp - 1
if f == 0:
dl = dl + c
elif f == 1:
il = il + c
elif f == 2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
dl = c
il = ''
f = 0
if f == 2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
vars = {}
if 'vars' in groupcache[groupcounter]:
vars = groupcache[groupcounter]['vars']
last_name = None
for l in ll:
l = [x.strip() for x in l]
if l[0][0] == ',':
l[0] = l[0][1:]
if l[0][0] == '(':
outmess(
'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0])
continue
i = 0
j = 0
llen = len(l[1])
for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]):
if v[0] == '(':
outmess(
'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v)
# XXX: subsequent init expressions may get wrong values.
# Ignoring since data statements are irrelevant for
# wrapping.
continue
fc = 0
while (i < llen) and (fc or not l[1][i] == ','):
if l[1][i] == "'":
fc = not fc
i = i + 1
i = i + 1
if v not in vars:
vars[v] = {}
if '=' in vars[v] and not vars[v]['='] == l[1][j:i - 1]:
outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % (
v, vars[v]['='], l[1][j:i - 1]))
vars[v]['='] = l[1][j:i - 1]
j = i
last_name = v
groupcache[groupcounter]['vars'] = vars
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'common':
line = m.group('after').strip()
if not line[0] == '/':
line = '//' + line
cl = []
f = 0
bn = ''
ol = ''
for c in line:
if c == '/':
f = f + 1
continue
if f >= 3:
bn = bn.strip()
if not bn:
bn = '_BLNK_'
cl.append([bn, ol])
f = f - 2
bn = ''
ol = ''
if f % 2:
bn = bn + c
else:
ol = ol + c
bn = bn.strip()
if not bn:
bn = '_BLNK_'
cl.append([bn, ol])
commonkey = {}
if 'common' in groupcache[groupcounter]:
commonkey = groupcache[groupcounter]['common']
for c in cl:
if c[0] not in commonkey:
commonkey[c[0]] = []
for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]:
if i:
commonkey[c[0]].append(i)
groupcache[groupcounter]['common'] = commonkey
previous_context = ('common', bn, groupcounter)
elif case == 'use':
m1 = re.match(
r'\A\s*(?P<name>\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z', m.group('after'), re.I)
if m1:
mm = m1.groupdict()
if 'use' not in groupcache[groupcounter]:
groupcache[groupcounter]['use'] = {}
name = m1.group('name')
groupcache[groupcounter]['use'][name] = {}
isonly = 0
if 'list' in mm and mm['list'] is not None:
if 'notonly' in mm and mm['notonly'] is None:
isonly = 1
groupcache[groupcounter]['use'][name]['only'] = isonly
ll = [x.strip() for x in mm['list'].split(',')]
rl = {}
for l in ll:
if '=' in l:
m2 = re.match(
r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z', l, re.I)
if m2:
rl[m2.group('local').strip()] = m2.group(
'use').strip()
else:
outmess(
'analyzeline: Not local=>use pattern found in %s\n' % repr(l))
else:
rl[l] = l
groupcache[groupcounter]['use'][name]['map'] = rl
else:
pass
else:
print(m.groupdict())
outmess('analyzeline: Could not crack the use statement.\n')
elif case in ['f2pyenhancements']:
if 'f2pyenhancements' not in groupcache[groupcounter]:
groupcache[groupcounter]['f2pyenhancements'] = {}
d = groupcache[groupcounter]['f2pyenhancements']
if m.group('this') == 'usercode' and 'usercode' in d:
if isinstance(d['usercode'], str):
d['usercode'] = [d['usercode']]
d['usercode'].append(m.group('after'))
else:
d[m.group('this')] = m.group('after')
elif case == 'multiline':
if previous_context is None:
if verbose:
outmess('analyzeline: No context for multiline block.\n')
return
gc = groupcounter
appendmultiline(groupcache[gc],
previous_context[:2],
m.group('this'))
else:
if verbose > 1:
print(m.groupdict())
outmess('analyzeline: No code implemented for line.\n')
def appendmultiline(group, context_name, ml):
if 'f2pymultilines' not in group:
group['f2pymultilines'] = {}
d = group['f2pymultilines']
if context_name not in d:
d[context_name] = []
d[context_name].append(ml)
return
def cracktypespec0(typespec, ll):
selector = None
attr = None
if re.match(r'double\s*complex', typespec, re.I):
typespec = 'double complex'
elif re.match(r'double\s*precision', typespec, re.I):
typespec = 'double precision'
else:
typespec = typespec.strip().lower()
m1 = selectpattern.match(markouterparen(ll))
if not m1:
outmess(
'cracktypespec0: no kind/char_selector pattern found for line.\n')
return
d = m1.groupdict()
for k in list(d.keys()):
d[k] = unmarkouterparen(d[k])
if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']:
selector = d['this']
ll = d['after']
i = ll.find('::')
if i >= 0:
attr = ll[:i].strip()
ll = ll[i + 2:]
return typespec, selector, attr, ll
#####
namepattern = re.compile(r'\s*(?P<name>\b[\w]+\b)\s*(?P<after>.*)\s*\Z', re.I)
kindselector = re.compile(
r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|[*]\s*(?P<kind2>.*?))\s*\Z', re.I)
charselector = re.compile(
r'\s*(\((?P<lenkind>.*)\)|[*]\s*(?P<charlen>.*))\s*\Z', re.I)
lenkindpattern = re.compile(
r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z', re.I)
lenarraypattern = re.compile(
r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*[*]\s*(?P<len>.*?)|([*]\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z', re.I)
def removespaces(expr):
expr = expr.strip()
if len(expr) <= 1:
return expr
expr2 = expr[0]
for i in range(1, len(expr) - 1):
if (expr[i] == ' ' and
((expr[i + 1] in "()[]{}=+-/* ") or
(expr[i - 1] in "()[]{}=+-/* "))):
continue
expr2 = expr2 + expr[i]
expr2 = expr2 + expr[-1]
return expr2
def markinnerspaces(line):
l = ''
f = 0
cc = '\''
cb = ''
for c in line:
if cb == '\\' and c in ['\\', '\'', '"']:
l = l + c
cb = c
continue
if f == 0 and c in ['\'', '"']:
cc = c
if c == cc:
f = f + 1
elif c == cc:
f = f - 1
elif c == ' ' and f == 1:
l = l + '@_@'
continue
l = l + c
cb = c
return l
def updatevars(typespec, selector, attrspec, entitydecl):
global groupcache, groupcounter
last_name = None
kindselect, charselect, typename = cracktypespec(typespec, selector)
if attrspec:
attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')]
l = []
c = re.compile(r'(?P<start>[a-zA-Z]+)')
for a in attrspec:
if not a:
continue
m = c.match(a)
if m:
s = m.group('start').lower()
a = s + a[len(s):]
l.append(a)
attrspec = l
el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')]
el1 = []
for e in el:
for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]:
if e1:
el1.append(e1.replace('@_@', ' '))
for e in el1:
m = namepattern.match(e)
if not m:
outmess(
'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e)))
continue
ename = rmbadname1(m.group('name'))
edecl = {}
if ename in groupcache[groupcounter]['vars']:
edecl = groupcache[groupcounter]['vars'][ename].copy()
not_has_typespec = 'typespec' not in edecl
if not_has_typespec:
edecl['typespec'] = typespec
elif typespec and (not typespec == edecl['typespec']):
outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['typespec'], typespec))
if 'kindselector' not in edecl:
edecl['kindselector'] = copy.copy(kindselect)
elif kindselect:
for k in list(kindselect.keys()):
if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]):
outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
k, ename, edecl['kindselector'][k], kindselect[k]))
else:
edecl['kindselector'][k] = copy.copy(kindselect[k])
if 'charselector' not in edecl and charselect:
if not_has_typespec:
edecl['charselector'] = charselect
else:
errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n'
% (ename, charselect))
elif charselect:
for k in list(charselect.keys()):
if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]):
outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
k, ename, edecl['charselector'][k], charselect[k]))
else:
edecl['charselector'][k] = copy.copy(charselect[k])
if 'typename' not in edecl:
edecl['typename'] = typename
elif typename and (not edecl['typename'] == typename):
outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['typename'], typename))
if 'attrspec' not in edecl:
edecl['attrspec'] = copy.copy(attrspec)
elif attrspec:
for a in attrspec:
if a not in edecl['attrspec']:
edecl['attrspec'].append(a)
else:
edecl['typespec'] = copy.copy(typespec)
edecl['kindselector'] = copy.copy(kindselect)
edecl['charselector'] = copy.copy(charselect)
edecl['typename'] = typename
edecl['attrspec'] = copy.copy(attrspec)
if m.group('after'):
m1 = lenarraypattern.match(markouterparen(m.group('after')))
if m1:
d1 = m1.groupdict()
for lk in ['len', 'array', 'init']:
if d1[lk + '2'] is not None:
d1[lk] = d1[lk + '2']
del d1[lk + '2']
for k in list(d1.keys()):
if d1[k] is not None:
d1[k] = unmarkouterparen(d1[k])
else:
del d1[k]
if 'len' in d1 and 'array' in d1:
if d1['len'] == '':
d1['len'] = d1['array']
del d1['array']
else:
d1['array'] = d1['array'] + ',' + d1['len']
del d1['len']
errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % (
typespec, e, typespec, ename, d1['array']))
if 'array' in d1:
dm = 'dimension(%s)' % d1['array']
if 'attrspec' not in edecl or (not edecl['attrspec']):
edecl['attrspec'] = [dm]
else:
edecl['attrspec'].append(dm)
for dm1 in edecl['attrspec']:
if dm1[:9] == 'dimension' and dm1 != dm:
del edecl['attrspec'][-1]
errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n'
% (ename, dm1, dm))
break
if 'len' in d1:
if typespec in ['complex', 'integer', 'logical', 'real']:
if ('kindselector' not in edecl) or (not edecl['kindselector']):
edecl['kindselector'] = {}
edecl['kindselector']['*'] = d1['len']
elif typespec == 'character':
if ('charselector' not in edecl) or (not edecl['charselector']):
edecl['charselector'] = {}
if 'len' in edecl['charselector']:
del edecl['charselector']['len']
edecl['charselector']['*'] = d1['len']
if 'init' in d1:
if '=' in edecl and (not edecl['='] == d1['init']):
outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['='], d1['init']))
else:
edecl['='] = d1['init']
else:
outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % (
ename + m.group('after')))
for k in list(edecl.keys()):
if not edecl[k]:
del edecl[k]
groupcache[groupcounter]['vars'][ename] = edecl
if 'varnames' in groupcache[groupcounter]:
groupcache[groupcounter]['varnames'].append(ename)
last_name = ename
return last_name
def cracktypespec(typespec, selector):
kindselect = None
charselect = None
typename = None
if selector:
if typespec in ['complex', 'integer', 'logical', 'real']:
kindselect = kindselector.match(selector)
if not kindselect:
outmess(
'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector)))
return
kindselect = kindselect.groupdict()
kindselect['*'] = kindselect['kind2']
del kindselect['kind2']
for k in list(kindselect.keys()):
if not kindselect[k]:
del kindselect[k]
for k, i in list(kindselect.items()):
kindselect[k] = rmbadname1(i)
elif typespec == 'character':
charselect = charselector.match(selector)
if not charselect:
outmess(
'cracktypespec: no charselector pattern found for %s\n' % (repr(selector)))
return
charselect = charselect.groupdict()
charselect['*'] = charselect['charlen']
del charselect['charlen']
if charselect['lenkind']:
lenkind = lenkindpattern.match(
markoutercomma(charselect['lenkind']))
lenkind = lenkind.groupdict()
for lk in ['len', 'kind']:
if lenkind[lk + '2']:
lenkind[lk] = lenkind[lk + '2']
charselect[lk] = lenkind[lk]
del lenkind[lk + '2']
del charselect['lenkind']
for k in list(charselect.keys()):
if not charselect[k]:
del charselect[k]
for k, i in list(charselect.items()):
charselect[k] = rmbadname1(i)
elif typespec == 'type':
typename = re.match(r'\s*\(\s*(?P<name>\w+)\s*\)', selector, re.I)
if typename:
typename = typename.group('name')
else:
outmess('cracktypespec: no typename found in %s\n' %
(repr(typespec + selector)))
else:
outmess('cracktypespec: no selector used for %s\n' %
(repr(selector)))
return kindselect, charselect, typename
######
def setattrspec(decl, attr, force=0):
if not decl:
decl = {}
if not attr:
return decl
if 'attrspec' not in decl:
decl['attrspec'] = [attr]
return decl
if force:
decl['attrspec'].append(attr)
if attr in decl['attrspec']:
return decl
if attr == 'static' and 'automatic' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'automatic' and 'static' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'public' and 'private' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'private' and 'public' not in decl['attrspec']:
decl['attrspec'].append(attr)
else:
decl['attrspec'].append(attr)
return decl
def setkindselector(decl, sel, force=0):
if not decl:
decl = {}
if not sel:
return decl
if 'kindselector' not in decl:
decl['kindselector'] = sel
return decl
for k in list(sel.keys()):
if force or k not in decl['kindselector']:
decl['kindselector'][k] = sel[k]
return decl
def setcharselector(decl, sel, force=0):
if not decl:
decl = {}
if not sel:
return decl
if 'charselector' not in decl:
decl['charselector'] = sel
return decl
for k in list(sel.keys()):
if force or k not in decl['charselector']:
decl['charselector'][k] = sel[k]
return decl
def getblockname(block, unknown='unknown'):
if 'name' in block:
return block['name']
return unknown
# post processing
def setmesstext(block):
global filepositiontext
try:
filepositiontext = 'In: %s:%s\n' % (block['from'], block['name'])
except:
pass
def get_usedict(block):
usedict = {}
if 'parent_block' in block:
usedict = get_usedict(block['parent_block'])
if 'use' in block:
usedict.update(block['use'])
return usedict
def get_useparameters(block, param_map=None):
global f90modulevars
if param_map is None:
param_map = {}
usedict = get_usedict(block)
if not usedict:
return param_map
for usename, mapping in list(usedict.items()):
usename = usename.lower()
if usename not in f90modulevars:
outmess('get_useparameters: no module %s info used by %s\n' %
(usename, block.get('name')))
continue
mvars = f90modulevars[usename]
params = get_parameters(mvars)
if not params:
continue
# XXX: apply mapping
if mapping:
errmess('get_useparameters: mapping for %s not impl.' % (mapping))
for k, v in list(params.items()):
if k in param_map:
outmess('get_useparameters: overriding parameter %s with'
' value from module %s' % (repr(k), repr(usename)))
param_map[k] = v
return param_map
def postcrack2(block, tab='', param_map=None):
global f90modulevars
if not f90modulevars:
return block
if isinstance(block, list):
ret = []
for g in block:
g = postcrack2(g, tab=tab + '\t', param_map=param_map)
ret.append(g)
return ret
setmesstext(block)
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
if param_map is None:
param_map = get_useparameters(block)
if param_map is not None and 'vars' in block:
vars = block['vars']
for n in list(vars.keys()):
var = vars[n]
if 'kindselector' in var:
kind = var['kindselector']
if 'kind' in kind:
val = kind['kind']
if val in param_map:
kind['kind'] = param_map[val]
new_body = []
for b in block['body']:
b = postcrack2(b, tab=tab + '\t', param_map=param_map)
new_body.append(b)
block['body'] = new_body
return block
def postcrack(block, args=None, tab=''):
"""
TODO:
function return values
determine expression types if in argument list
"""
global usermodules, onlyfunctions
if isinstance(block, list):
gret = []
uret = []
for g in block:
setmesstext(g)
g = postcrack(g, tab=tab + '\t')
# sort user routines to appear first
if 'name' in g and '__user__' in g['name']:
uret.append(g)
else:
gret.append(g)
return uret + gret
setmesstext(block)
if not isinstance(block, dict) and 'block' not in block:
raise Exception('postcrack: Expected block dictionary instead of ' +
str(block))
if 'name' in block and not block['name'] == 'unknown_interface':
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
block = analyzeargs(block)
block = analyzecommon(block)
block['vars'] = analyzevars(block)
block['sortvars'] = sortvarnames(block['vars'])
if 'args' in block and block['args']:
args = block['args']
block['body'] = analyzebody(block, args, tab=tab)
userisdefined = []
if 'use' in block:
useblock = block['use']
for k in list(useblock.keys()):
if '__user__' in k:
userisdefined.append(k)
else:
useblock = {}
name = ''
if 'name' in block:
name = block['name']
# and not userisdefined: # Build a __user__ module
if 'externals' in block and block['externals']:
interfaced = []
if 'interfaced' in block:
interfaced = block['interfaced']
mvars = copy.copy(block['vars'])
if name:
mname = name + '__user__routines'
else:
mname = 'unknown__user__routines'
if mname in userisdefined:
i = 1
while '%s_%i' % (mname, i) in userisdefined:
i = i + 1
mname = '%s_%i' % (mname, i)
interface = {'block': 'interface', 'body': [],
'vars': {}, 'name': name + '_user_interface'}
for e in block['externals']:
if e in interfaced:
edef = []
j = -1
for b in block['body']:
j = j + 1
if b['block'] == 'interface':
i = -1
for bb in b['body']:
i = i + 1
if 'name' in bb and bb['name'] == e:
edef = copy.copy(bb)
del b['body'][i]
break
if edef:
if not b['body']:
del block['body'][j]
del interfaced[interfaced.index(e)]
break
interface['body'].append(edef)
else:
if e in mvars and not isexternal(mvars[e]):
interface['vars'][e] = mvars[e]
if interface['vars'] or interface['body']:
block['interfaced'] = interfaced
mblock = {'block': 'python module', 'body': [
interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']}
useblock[mname] = {}
usermodules.append(mblock)
if useblock:
block['use'] = useblock
return block
def sortvarnames(vars):
indep = []
dep = []
for v in list(vars.keys()):
if 'depend' in vars[v] and vars[v]['depend']:
dep.append(v)
else:
indep.append(v)
n = len(dep)
i = 0
while dep: # XXX: How to catch dependence cycles correctly?
v = dep[0]
fl = 0
for w in dep[1:]:
if w in vars[v]['depend']:
fl = 1
break
if fl:
dep = dep[1:] + [v]
i = i + 1
if i > n:
errmess('sortvarnames: failed to compute dependencies because'
' of cyclic dependencies between '
+ ', '.join(dep) + '\n')
indep = indep + dep
break
else:
indep.append(v)
dep = dep[1:]
n = len(dep)
i = 0
return indep
def analyzecommon(block):
if not hascommon(block):
return block
commonvars = []
for k in list(block['common'].keys()):
comvars = []
for e in block['common'][k]:
m = re.match(
r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z', e, re.I)
if m:
dims = []
if m.group('dims'):
dims = [x.strip()
for x in markoutercomma(m.group('dims')).split('@,@')]
n = m.group('name').strip()
if n in block['vars']:
if 'attrspec' in block['vars'][n]:
block['vars'][n]['attrspec'].append(
'dimension(%s)' % (','.join(dims)))
else:
block['vars'][n]['attrspec'] = [
'dimension(%s)' % (','.join(dims))]
else:
if dims:
block['vars'][n] = {
'attrspec': ['dimension(%s)' % (','.join(dims))]}
else:
block['vars'][n] = {}
if n not in commonvars:
commonvars.append(n)
else:
n = e
errmess(
'analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n' % (e, k))
comvars.append(n)
block['common'][k] = comvars
if 'commonvars' not in block:
block['commonvars'] = commonvars
else:
block['commonvars'] = block['commonvars'] + commonvars
return block
def analyzebody(block, args, tab=''):
global usermodules, skipfuncs, onlyfuncs, f90modulevars
setmesstext(block)
body = []
for b in block['body']:
b['parent_block'] = block
if b['block'] in ['function', 'subroutine']:
if args is not None and b['name'] not in args:
continue
else:
as_ = b['args']
if b['name'] in skipfuncs:
continue
if onlyfuncs and b['name'] not in onlyfuncs:
continue
b['saved_interface'] = crack2fortrangen(
b, '\n' + ' ' * 6, as_interface=True)
else:
as_ = args
b = postcrack(b, as_, tab=tab + '\t')
if b['block'] == 'interface' and not b['body']:
if 'f2pyenhancements' not in b:
continue
if b['block'].replace(' ', '') == 'pythonmodule':
usermodules.append(b)
else:
if b['block'] == 'module':
f90modulevars[b['name']] = b['vars']
body.append(b)
return body
def buildimplicitrules(block):
setmesstext(block)
implicitrules = defaultimplicitrules
attrrules = {}
if 'implicit' in block:
if block['implicit'] is None:
implicitrules = None
if verbose > 1:
outmess(
'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name']))
else:
for k in list(block['implicit'].keys()):
if block['implicit'][k].get('typespec') not in ['static', 'automatic']:
implicitrules[k] = block['implicit'][k]
else:
attrrules[k] = block['implicit'][k]['typespec']
return implicitrules, attrrules
def myeval(e, g=None, l=None):
r = eval(e, g, l)
if type(r) in [type(0), type(0.0)]:
return r
raise ValueError('r=%r' % (r))
getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I)
def getlincoef(e, xset): # e = a*x+b ; x in xset
try:
c = int(myeval(e, {}, {}))
return 0, c, None
except:
pass
if getlincoef_re_1.match(e):
return 1, 0, e
len_e = len(e)
for x in xset:
if len(x) > len_e:
continue
if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e):
# skip function calls having x as an argument, e.g max(1, x)
continue
re_1 = re.compile(r'(?P<before>.*?)\b' + x + r'\b(?P<after>.*)', re.I)
m = re_1.match(e)
if m:
try:
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0, m1.group('after'))
m1 = re_1.match(ee)
b = myeval(ee, {}, {})
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1, m1.group('after'))
m1 = re_1.match(ee)
a = myeval(ee, {}, {}) - b
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0.5, m1.group('after'))
m1 = re_1.match(ee)
c = myeval(ee, {}, {})
# computing another point to be sure that expression is linear
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1.5, m1.group('after'))
m1 = re_1.match(ee)
c2 = myeval(ee, {}, {})
if (a * 0.5 + b == c and a * 1.5 + b == c2):
return a, b, x
except:
pass
break
return None, None, None
_varname_match = re.compile(r'\A[a-z]\w*\Z').match
def getarrlen(dl, args, star='*'):
edl = []
try:
edl.append(myeval(dl[0], {}, {}))
except:
edl.append(dl[0])
try:
edl.append(myeval(dl[1], {}, {}))
except:
edl.append(dl[1])
if isinstance(edl[0], int):
p1 = 1 - edl[0]
if p1 == 0:
d = str(dl[1])
elif p1 < 0:
d = '%s-%s' % (dl[1], -p1)
else:
d = '%s+%s' % (dl[1], p1)
elif isinstance(edl[1], int):
p1 = 1 + edl[1]
if p1 == 0:
d = '-(%s)' % (dl[0])
else:
d = '%s-(%s)' % (p1, dl[0])
else:
d = '%s-(%s)+1' % (dl[1], dl[0])
try:
return repr(myeval(d, {}, {})), None, None
except:
pass
d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args)
if None not in [d1[0], d2[0]]:
if (d1[0], d2[0]) == (0, 0):
return repr(d2[1] - d1[1] + 1), None, None
b = d2[1] - d1[1] + 1
d1 = (d1[0], 0, d1[2])
d2 = (d2[0], b, d2[2])
if d1[0] == 0 and d2[2] in args:
if b < 0:
return '%s * %s - %s' % (d2[0], d2[2], -b), d2[2], '+%s)/(%s)' % (-b, d2[0])
elif b:
return '%s * %s + %s' % (d2[0], d2[2], b), d2[2], '-%s)/(%s)' % (b, d2[0])
else:
return '%s * %s' % (d2[0], d2[2]), d2[2], ')/(%s)' % (d2[0])
if d2[0] == 0 and d1[2] in args:
if b < 0:
return '%s * %s - %s' % (-d1[0], d1[2], -b), d1[2], '+%s)/(%s)' % (-b, -d1[0])
elif b:
return '%s * %s + %s' % (-d1[0], d1[2], b), d1[2], '-%s)/(%s)' % (b, -d1[0])
else:
return '%s * %s' % (-d1[0], d1[2]), d1[2], ')/(%s)' % (-d1[0])
if d1[2] == d2[2] and d1[2] in args:
a = d2[0] - d1[0]
if not a:
return repr(b), None, None
if b < 0:
return '%s * %s - %s' % (a, d1[2], -b), d2[2], '+%s)/(%s)' % (-b, a)
elif b:
return '%s * %s + %s' % (a, d1[2], b), d2[2], '-%s)/(%s)' % (b, a)
else:
return '%s * %s' % (a, d1[2]), d2[2], ')/(%s)' % (a)
if d1[0] == d2[0] == 1:
c = str(d1[2])
if c not in args:
if _varname_match(c):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c))
c = '(%s)' % c
if b == 0:
d = '%s-%s' % (d2[2], c)
elif b < 0:
d = '%s-%s-%s' % (d2[2], c, -b)
else:
d = '%s-%s+%s' % (d2[2], c, b)
elif d1[0] == 0:
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)' % c2
if d2[0] == 1:
pass
elif d2[0] == -1:
c2 = '-%s' % c2
else:
c2 = '%s*%s' % (d2[0], c2)
if b == 0:
d = c2
elif b < 0:
d = '%s-%s' % (c2, -b)
else:
d = '%s+%s' % (c2, b)
elif d2[0] == 0:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)' % c1
if d1[0] == 1:
c1 = '-%s' % c1
elif d1[0] == -1:
c1 = '+%s' % c1
elif d1[0] < 0:
c1 = '+%s*%s' % (-d1[0], c1)
else:
c1 = '-%s*%s' % (d1[0], c1)
if b == 0:
d = c1
elif b < 0:
d = '%s-%s' % (c1, -b)
else:
d = '%s+%s' % (c1, b)
else:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)' % c1
if d1[0] == 1:
c1 = '-%s' % c1
elif d1[0] == -1:
c1 = '+%s' % c1
elif d1[0] < 0:
c1 = '+%s*%s' % (-d1[0], c1)
else:
c1 = '-%s*%s' % (d1[0], c1)
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)' % c2
if d2[0] == 1:
pass
elif d2[0] == -1:
c2 = '-%s' % c2
else:
c2 = '%s*%s' % (d2[0], c2)
if b == 0:
d = '%s%s' % (c2, c1)
elif b < 0:
d = '%s%s-%s' % (c2, c1, -b)
else:
d = '%s%s+%s' % (c2, c1, b)
return d, None, None
word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I)
def _get_depend_dict(name, vars, deps):
if name in vars:
words = vars[name].get('depend', [])
if '=' in vars[name] and not isstring(vars[name]):
for word in word_pattern.findall(vars[name]['=']):
if word not in words and word in vars:
words.append(word)
for word in words[:]:
for w in deps.get(word, []) \
or _get_depend_dict(word, vars, deps):
if w not in words:
words.append(w)
else:
outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name)))
words = []
deps[name] = words
return words
def _calc_depend_dict(vars):
names = list(vars.keys())
depend_dict = {}
for n in names:
_get_depend_dict(n, vars, depend_dict)
return depend_dict
def get_sorted_names(vars):
"""
"""
depend_dict = _calc_depend_dict(vars)
names = []
for name in list(depend_dict.keys()):
if not depend_dict[name]:
names.append(name)
del depend_dict[name]
while depend_dict:
for name, lst in list(depend_dict.items()):
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
names.append(name)
del depend_dict[name]
else:
depend_dict[name] = new_lst
return [name for name in names if name in vars]
def _kind_func(string):
# XXX: return something sensible.
if string[0] in "'\"":
string = string[1:-1]
if real16pattern.match(string):
return 8
elif real8pattern.match(string):
return 4
return 'kind(' + string + ')'
def _selected_int_kind_func(r):
# XXX: This should be processor dependent
m = 10 ** r
if m <= 2 ** 8:
return 1
if m <= 2 ** 16:
return 2
if m <= 2 ** 32:
return 4
if m <= 2 ** 63:
return 8
if m <= 2 ** 128:
return 16
return -1
def _selected_real_kind_func(p, r=0, radix=0):
# XXX: This should be processor dependent
# This is only good for 0 <= p <= 20
if p < 7:
return 4
if p < 16:
return 8
if platform.machine().lower().startswith('power'):
if p <= 20:
return 16
else:
if p < 19:
return 10
elif p <= 20:
return 16
return -1
def get_parameters(vars, global_params={}):
params = copy.copy(global_params)
g_params = copy.copy(global_params)
for name, func in [('kind', _kind_func),
('selected_int_kind', _selected_int_kind_func),
('selected_real_kind', _selected_real_kind_func), ]:
if name not in g_params:
g_params[name] = func
param_names = []
for n in get_sorted_names(vars):
if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']:
param_names.append(n)
kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_int_kind_re = re.compile(
r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_kind_re = re.compile(
r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
for n in param_names:
if '=' in vars[n]:
v = vars[n]['=']
if islogical(vars[n]):
v = v.lower()
for repl in [
('.false.', 'False'),
('.true.', 'True'),
# TODO: test .eq., .neq., etc replacements.
]:
v = v.replace(*repl)
v = kind_re.sub(r'kind("\1")', v)
v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v)
if isinteger(vars[n]) and not selected_kind_re.match(v):
v = v.split('_')[0]
if isdouble(vars[n]):
tt = list(v)
for m in real16pattern.finditer(v):
tt[m.start():m.end()] = list(
v[m.start():m.end()].lower().replace('d', 'e'))
v = ''.join(tt)
if iscomplex(vars[n]):
if v[0] == '(' and v[-1] == ')':
# FIXME, unused l looks like potential bug
l = markoutercomma(v[1:-1]).split('@,@')
try:
params[n] = eval(v, g_params, params)
except Exception as msg:
params[n] = v
outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v)))
if isstring(vars[n]) and isinstance(params[n], int):
params[n] = chr(params[n])
nl = n.lower()
if nl != n:
params[nl] = params[n]
else:
print(vars[n])
outmess(
'get_parameters:parameter %s does not have value?!\n' % (repr(n)))
return params
def _eval_length(length, params):
if length in ['(:)', '(*)', '*']:
return '(*)'
return _eval_scalar(length, params)
_is_kind_number = re.compile(r'\d+_').match
def _eval_scalar(value, params):
if _is_kind_number(value):
value = value.split('_')[0]
try:
value = str(eval(value, {}, params))
except (NameError, SyntaxError):
return value
except Exception as msg:
errmess('"%s" in evaluating %r '
'(available names: %s)\n'
% (msg, value, list(params.keys())))
return value
def analyzevars(block):
global f90modulevars
setmesstext(block)
implicitrules, attrrules = buildimplicitrules(block)
vars = copy.copy(block['vars'])
if block['block'] == 'function' and block['name'] not in vars:
vars[block['name']] = {}
if '' in block['vars']:
del vars['']
if 'attrspec' in block['vars']['']:
gen = block['vars']['']['attrspec']
for n in list(vars.keys()):
for k in ['public', 'private']:
if k in gen:
vars[n] = setattrspec(vars[n], k)
svars = []
args = block['args']
for a in args:
try:
vars[a]
svars.append(a)
except KeyError:
pass
for n in list(vars.keys()):
if n not in args:
svars.append(n)
params = get_parameters(vars, get_useparameters(block))
dep_matches = {}
name_match = re.compile(r'\w[\w\d_$]*').match
for v in list(vars.keys()):
m = name_match(v)
if m:
n = v[m.start():m.end()]
try:
dep_matches[n]
except KeyError:
dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match
for n in svars:
if n[0] in list(attrrules.keys()):
vars[n] = setattrspec(vars[n], attrrules[n[0]])
if 'typespec' not in vars[n]:
if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']):
if implicitrules:
ln0 = n[0].lower()
for k in list(implicitrules[ln0].keys()):
if k == 'typespec' and implicitrules[ln0][k] == 'undefined':
continue
if k not in vars[n]:
vars[n][k] = implicitrules[ln0][k]
elif k == 'attrspec':
for l in implicitrules[ln0][k]:
vars[n] = setattrspec(vars[n], l)
elif n in block['args']:
outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % (
repr(n), block['name']))
if 'charselector' in vars[n]:
if 'len' in vars[n]['charselector']:
l = vars[n]['charselector']['len']
try:
l = str(eval(l, {}, params))
except:
pass
vars[n]['charselector']['len'] = l
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
l = vars[n]['kindselector']['kind']
try:
l = str(eval(l, {}, params))
except:
pass
vars[n]['kindselector']['kind'] = l
savelindims = {}
if 'attrspec' in vars[n]:
attr = vars[n]['attrspec']
attr.reverse()
vars[n]['attrspec'] = []
dim, intent, depend, check, note = None, None, None, None, None
for a in attr:
if a[:9] == 'dimension':
dim = (a[9:].strip())[1:-1]
elif a[:6] == 'intent':
intent = (a[6:].strip())[1:-1]
elif a[:6] == 'depend':
depend = (a[6:].strip())[1:-1]
elif a[:5] == 'check':
check = (a[5:].strip())[1:-1]
elif a[:4] == 'note':
note = (a[4:].strip())[1:-1]
else:
vars[n] = setattrspec(vars[n], a)
if intent:
if 'intent' not in vars[n]:
vars[n]['intent'] = []
for c in [x.strip() for x in markoutercomma(intent).split('@,@')]:
# Remove spaces so that 'in out' becomes 'inout'
tmp = c.replace(' ', '')
if tmp not in vars[n]['intent']:
vars[n]['intent'].append(tmp)
intent = None
if note:
note = note.replace('\\n\\n', '\n\n')
note = note.replace('\\n ', '\n')
if 'note' not in vars[n]:
vars[n]['note'] = [note]
else:
vars[n]['note'].append(note)
note = None
if depend is not None:
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]):
if c not in vars[n]['depend']:
vars[n]['depend'].append(c)
depend = None
if check is not None:
if 'check' not in vars[n]:
vars[n]['check'] = []
for c in [x.strip() for x in markoutercomma(check).split('@,@')]:
if c not in vars[n]['check']:
vars[n]['check'].append(c)
check = None
if dim and 'dimension' not in vars[n]:
vars[n]['dimension'] = []
for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]):
star = '*'
if d == ':':
star = ':'
if d in params:
d = str(params[d])
for p in list(params.keys()):
re_1 = re.compile(r'(?P<before>.*?)\b' + p + r'\b(?P<after>.*)', re.I)
m = re_1.match(d)
while m:
d = m.group('before') + \
str(params[p]) + m.group('after')
m = re_1.match(d)
if d == star:
dl = [star]
else:
dl = markoutercomma(d, ':').split('@:@')
if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*)
dl = ['*']
d = '*'
if len(dl) == 1 and not dl[0] == star:
dl = ['1', dl[0]]
if len(dl) == 2:
d, v, di = getarrlen(dl, list(block['vars'].keys()))
if d[:4] == '1 * ':
d = d[4:]
if di and di[-4:] == '/(1)':
di = di[:-4]
if v:
savelindims[d] = v, di
vars[n]['dimension'].append(d)
if 'dimension' in vars[n]:
if isintent_c(vars[n]):
shape_macro = 'shape'
else:
shape_macro = 'shape' # 'fshape'
if isstringarray(vars[n]):
if 'charselector' in vars[n]:
d = vars[n]['charselector']
if '*' in d:
d = d['*']
errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'
% (d, n,
','.join(vars[n]['dimension']),
n, ','.join(vars[n]['dimension'] + [d])))
vars[n]['dimension'].append(d)
del vars[n]['charselector']
if 'intent' not in vars[n]:
vars[n]['intent'] = []
if 'c' not in vars[n]['intent']:
vars[n]['intent'].append('c')
else:
errmess(
"analyzevars: charselector=%r unhandled." % (d))
if 'check' not in vars[n] and 'args' in block and n in block['args']:
flag = 'depend' not in vars[n]
if flag:
vars[n]['depend'] = []
vars[n]['check'] = []
if 'dimension' in vars[n]:
#/----< no check
i = -1
ni = len(vars[n]['dimension'])
for d in vars[n]['dimension']:
ddeps = [] # dependecies of 'd'
ad = ''
pd = ''
if d not in vars:
if d in savelindims:
pd, ad = '(', savelindims[d][1]
d = savelindims[d][0]
else:
for r in block['args']:
if r not in vars:
continue
if re.match(r'.*?\b' + r + r'\b', d, re.I):
ddeps.append(r)
if d in vars:
if 'attrspec' in vars[d]:
for aa in vars[d]['attrspec']:
if aa[:6] == 'depend':
ddeps += aa[6:].strip()[1:-1].split(',')
if 'depend' in vars[d]:
ddeps = ddeps + vars[d]['depend']
i = i + 1
if d in vars and ('depend' not in vars[d]) \
and ('=' not in vars[d]) and (d not in vars[n]['depend']) \
and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]):
vars[d]['depend'] = [n]
if ni > 1:
vars[d]['='] = '%s%s(%s,%s)%s' % (
pd, shape_macro, n, i, ad)
else:
vars[d]['='] = '%slen(%s)%s' % (pd, n, ad)
# /---< no check
if 1 and 'check' not in vars[d]:
if ni > 1:
vars[d]['check'] = ['%s%s(%s,%i)%s==%s'
% (pd, shape_macro, n, i, ad, d)]
else:
vars[d]['check'] = [
'%slen(%s)%s>=%s' % (pd, n, ad, d)]
if 'attrspec' not in vars[d]:
vars[d]['attrspec'] = ['optional']
if ('optional' not in vars[d]['attrspec']) and\
('required' not in vars[d]['attrspec']):
vars[d]['attrspec'].append('optional')
elif d not in ['*', ':']:
#/----< no check
if flag:
if d in vars:
if n not in ddeps:
vars[n]['depend'].append(d)
else:
vars[n]['depend'] = vars[n]['depend'] + ddeps
elif isstring(vars[n]):
length = '1'
if 'charselector' in vars[n]:
if '*' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['*'],
params)
vars[n]['charselector']['*'] = length
elif 'len' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['len'],
params)
del vars[n]['charselector']['len']
vars[n]['charselector']['*'] = length
if not vars[n]['check']:
del vars[n]['check']
if flag and not vars[n]['depend']:
del vars[n]['depend']
if '=' in vars[n]:
if 'attrspec' not in vars[n]:
vars[n]['attrspec'] = []
if ('optional' not in vars[n]['attrspec']) and \
('required' not in vars[n]['attrspec']):
vars[n]['attrspec'].append('optional')
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for v, m in list(dep_matches.items()):
if m(vars[n]['=']):
vars[n]['depend'].append(v)
if not vars[n]['depend']:
del vars[n]['depend']
if isscalar(vars[n]):
vars[n]['='] = _eval_scalar(vars[n]['='], params)
for n in list(vars.keys()):
if n == block['name']: # n is block name
if 'note' in vars[n]:
block['note'] = vars[n]['note']
if block['block'] == 'function':
if 'result' in block and block['result'] in vars:
vars[n] = appenddecl(vars[n], vars[block['result']])
if 'prefix' in block:
pr = block['prefix']
ispure = 0
isrec = 1
pr1 = pr.replace('pure', '')
ispure = (not pr == pr1)
pr = pr1.replace('recursive', '')
isrec = (not pr == pr1)
m = typespattern[0].match(pr)
if m:
typespec, selector, attr, edecl = cracktypespec0(
m.group('this'), m.group('after'))
kindselect, charselect, typename = cracktypespec(
typespec, selector)
vars[n]['typespec'] = typespec
if kindselect:
if 'kind' in kindselect:
try:
kindselect['kind'] = eval(
kindselect['kind'], {}, params)
except:
pass
vars[n]['kindselector'] = kindselect
if charselect:
vars[n]['charselector'] = charselect
if typename:
vars[n]['typename'] = typename
if ispure:
vars[n] = setattrspec(vars[n], 'pure')
if isrec:
vars[n] = setattrspec(vars[n], 'recursive')
else:
outmess(
'analyzevars: prefix (%s) were not used\n' % repr(block['prefix']))
if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']:
if 'commonvars' in block:
neededvars = copy.copy(block['args'] + block['commonvars'])
else:
neededvars = copy.copy(block['args'])
for n in list(vars.keys()):
if l_or(isintent_callback, isintent_aux)(vars[n]):
neededvars.append(n)
if 'entry' in block:
neededvars.extend(list(block['entry'].keys()))
for k in list(block['entry'].keys()):
for n in block['entry'][k]:
if n not in neededvars:
neededvars.append(n)
if block['block'] == 'function':
if 'result' in block:
neededvars.append(block['result'])
else:
neededvars.append(block['name'])
if block['block'] in ['subroutine', 'function']:
name = block['name']
if name in vars and 'intent' in vars[name]:
block['intent'] = vars[name]['intent']
if block['block'] == 'type':
neededvars.extend(list(vars.keys()))
for n in list(vars.keys()):
if n not in neededvars:
del vars[n]
return vars
analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I)
def expr2name(a, block, args=[]):
orig_a = a
a_is_expr = not analyzeargs_re_1.match(a)
if a_is_expr: # `a` is an expression
implicitrules, attrrules = buildimplicitrules(block)
at = determineexprtype(a, block['vars'], implicitrules)
na = 'e_'
for c in a:
c = c.lower()
if c not in string.ascii_lowercase + string.digits:
c = '_'
na = na + c
if na[-1] == '_':
na = na + 'e'
else:
na = na + '_e'
a = na
while a in block['vars'] or a in block['args']:
a = a + 'r'
if a in args:
k = 1
while a + str(k) in args:
k = k + 1
a = a + str(k)
if a_is_expr:
block['vars'][a] = at
else:
if a not in block['vars']:
if orig_a in block['vars']:
block['vars'][a] = block['vars'][orig_a]
else:
block['vars'][a] = {}
if 'externals' in block and orig_a in block['externals'] + block['interfaced']:
block['vars'][a] = setattrspec(block['vars'][a], 'external')
return a
def analyzeargs(block):
setmesstext(block)
implicitrules, attrrules = buildimplicitrules(block)
if 'args' not in block:
block['args'] = []
args = []
for a in block['args']:
a = expr2name(a, block, args)
args.append(a)
block['args'] = args
if 'entry' in block:
for k, args1 in list(block['entry'].items()):
for a in args1:
if a not in block['vars']:
block['vars'][a] = {}
for b in block['body']:
if b['name'] in args:
if 'externals' not in block:
block['externals'] = []
if b['name'] not in block['externals']:
block['externals'].append(b['name'])
if 'result' in block and block['result'] not in block['vars']:
block['vars'][block['result']] = {}
return block
determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I)
determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P<name>[\w]+)|)\Z', re.I)
determineexprtype_re_3 = re.compile(
r'\A[+-]?[\d.]+[\d+-de.]*(_(P<name>[\w]+)|)\Z', re.I)
determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I)
determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z', re.I)
def _ensure_exprdict(r):
if isinstance(r, int):
return {'typespec': 'integer'}
if isinstance(r, float):
return {'typespec': 'real'}
if isinstance(r, complex):
return {'typespec': 'complex'}
if isinstance(r, dict):
return r
raise AssertionError(repr(r))
def determineexprtype(expr, vars, rules={}):
if expr in vars:
return _ensure_exprdict(vars[expr])
expr = expr.strip()
if determineexprtype_re_1.match(expr):
return {'typespec': 'complex'}
m = determineexprtype_re_2.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess(
'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
return {'typespec': 'integer'}
m = determineexprtype_re_3.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess(
'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
return {'typespec': 'real'}
for op in ['+', '-', '*', '/']:
for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]:
if e in vars:
return _ensure_exprdict(vars[e])
t = {}
if determineexprtype_re_4.match(expr): # in parenthesis
t = determineexprtype(expr[1:-1], vars, rules)
else:
m = determineexprtype_re_5.match(expr)
if m:
rn = m.group('name')
t = determineexprtype(m.group('name'), vars, rules)
if t and 'attrspec' in t:
del t['attrspec']
if not t:
if rn[0] in rules:
return _ensure_exprdict(rules[rn[0]])
if expr[0] in '\'"':
return {'typespec': 'character', 'charselector': {'*': '*'}}
if not t:
outmess(
'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr)))
return t
######
def crack2fortrangen(block, tab='\n', as_interface=False):
global skipfuncs, onlyfuncs
setmesstext(block)
ret = ''
if isinstance(block, list):
for g in block:
if g and g['block'] in ['function', 'subroutine']:
if g['name'] in skipfuncs:
continue
if onlyfuncs and g['name'] not in onlyfuncs:
continue
ret = ret + crack2fortrangen(g, tab, as_interface=as_interface)
return ret
prefix = ''
name = ''
args = ''
blocktype = block['block']
if blocktype == 'program':
return ''
argsl = []
if 'name' in block:
name = block['name']
if 'args' in block:
vars = block['vars']
for a in block['args']:
a = expr2name(a, block, argsl)
if not isintent_callback(vars[a]):
argsl.append(a)
if block['block'] == 'function' or argsl:
args = '(%s)' % ','.join(argsl)
f2pyenhancements = ''
if 'f2pyenhancements' in block:
for k in list(block['f2pyenhancements'].keys()):
f2pyenhancements = '%s%s%s %s' % (
f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k])
intent_lst = block.get('intent', [])[:]
if blocktype == 'function' and 'callback' in intent_lst:
intent_lst.remove('callback')
if intent_lst:
f2pyenhancements = '%s%sintent(%s) %s' %\
(f2pyenhancements, tab + tabchar,
','.join(intent_lst), name)
use = ''
if 'use' in block:
use = use2fortran(block['use'], tab + tabchar)
common = ''
if 'common' in block:
common = common2fortran(block['common'], tab + tabchar)
if name == 'unknown_interface':
name = ''
result = ''
if 'result' in block:
result = ' result (%s)' % block['result']
if block['result'] not in argsl:
argsl.append(block['result'])
body = crack2fortrangen(block['body'], tab + tabchar)
vars = vars2fortran(
block, block['vars'], argsl, tab + tabchar, as_interface=as_interface)
mess = ''
if 'from' in block and not as_interface:
mess = '! in %s' % block['from']
if 'entry' in block:
entry_stmts = ''
for k, i in list(block['entry'].items()):
entry_stmts = '%s%sentry %s(%s)' \
% (entry_stmts, tab + tabchar, k, ','.join(i))
body = body + entry_stmts
if blocktype == 'block data' and name == '_BLOCK_DATA_':
name = ''
ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % (
tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name)
return ret
def common2fortran(common, tab=''):
ret = ''
for k in list(common.keys()):
if k == '_BLNK_':
ret = '%s%scommon %s' % (ret, tab, ','.join(common[k]))
else:
ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k]))
return ret
def use2fortran(use, tab=''):
ret = ''
for m in list(use.keys()):
ret = '%s%suse %s,' % (ret, tab, m)
if use[m] == {}:
if ret and ret[-1] == ',':
ret = ret[:-1]
continue
if 'only' in use[m] and use[m]['only']:
ret = '%s only:' % (ret)
if 'map' in use[m] and use[m]['map']:
c = ' '
for k in list(use[m]['map'].keys()):
if k == use[m]['map'][k]:
ret = '%s%s%s' % (ret, c, k)
c = ','
else:
ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k])
c = ','
if ret and ret[-1] == ',':
ret = ret[:-1]
return ret
def true_intent_list(var):
lst = var['intent']
ret = []
for intent in lst:
try:
c = eval('isintent_%s(var)' % intent)
except NameError:
c = 0
if c:
ret.append(intent)
return ret
def vars2fortran(block, vars, args, tab='', as_interface=False):
"""
TODO:
public sub
...
"""
setmesstext(block)
ret = ''
nout = []
for a in args:
if a in block['vars']:
nout.append(a)
if 'commonvars' in block:
for a in block['commonvars']:
if a in vars:
if a not in nout:
nout.append(a)
else:
errmess(
'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a)
if 'varnames' in block:
nout.extend(block['varnames'])
if not as_interface:
for a in list(vars.keys()):
if a not in nout:
nout.append(a)
for a in nout:
if 'depend' in vars[a]:
for d in vars[a]['depend']:
if d in vars and 'depend' in vars[d] and a in vars[d]['depend']:
errmess(
'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d))
if 'externals' in block and a in block['externals']:
if isintent_callback(vars[a]):
ret = '%s%sintent(callback) %s' % (ret, tab, a)
ret = '%s%sexternal %s' % (ret, tab, a)
if isoptional(vars[a]):
ret = '%s%soptional %s' % (ret, tab, a)
if a in vars and 'typespec' not in vars[a]:
continue
cont = 1
for b in block['body']:
if a == b['name'] and b['block'] == 'function':
cont = 0
break
if cont:
continue
if a not in vars:
show(vars)
outmess('vars2fortran: No definition for argument "%s".\n' % a)
continue
if a == block['name'] and not block['block'] == 'function':
continue
if 'typespec' not in vars[a]:
if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']:
if a in args:
ret = '%s%sexternal %s' % (ret, tab, a)
continue
show(vars[a])
outmess('vars2fortran: No typespec for argument "%s".\n' % a)
continue
vardef = vars[a]['typespec']
if vardef == 'type' and 'typename' in vars[a]:
vardef = '%s(%s)' % (vardef, vars[a]['typename'])
selector = {}
if 'kindselector' in vars[a]:
selector = vars[a]['kindselector']
elif 'charselector' in vars[a]:
selector = vars[a]['charselector']
if '*' in selector:
if selector['*'] in ['*', ':']:
vardef = '%s*(%s)' % (vardef, selector['*'])
else:
vardef = '%s*%s' % (vardef, selector['*'])
else:
if 'len' in selector:
vardef = '%s(len=%s' % (vardef, selector['len'])
if 'kind' in selector:
vardef = '%s,kind=%s)' % (vardef, selector['kind'])
else:
vardef = '%s)' % (vardef)
elif 'kind' in selector:
vardef = '%s(kind=%s)' % (vardef, selector['kind'])
c = ' '
if 'attrspec' in vars[a]:
attr = []
for l in vars[a]['attrspec']:
if l not in ['external']:
attr.append(l)
if attr:
vardef = '%s, %s' % (vardef, ','.join(attr))
c = ','
if 'dimension' in vars[a]:
vardef = '%s%sdimension(%s)' % (
vardef, c, ','.join(vars[a]['dimension']))
c = ','
if 'intent' in vars[a]:
lst = true_intent_list(vars[a])
if lst:
vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst))
c = ','
if 'check' in vars[a]:
vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check']))
c = ','
if 'depend' in vars[a]:
vardef = '%s%sdepend(%s)' % (
vardef, c, ','.join(vars[a]['depend']))
c = ','
if '=' in vars[a]:
v = vars[a]['=']
if vars[a]['typespec'] in ['complex', 'double complex']:
try:
v = eval(v)
v = '(%s,%s)' % (v.real, v.imag)
except:
pass
vardef = '%s :: %s=%s' % (vardef, a, v)
else:
vardef = '%s :: %s' % (vardef, a)
ret = '%s%s%s' % (ret, tab, vardef)
return ret
######
def crackfortran(files):
global usermodules
outmess('Reading fortran codes...\n', 0)
readfortrancode(files, crackline)
outmess('Post-processing...\n', 0)
usermodules = []
postlist = postcrack(grouplist[0])
outmess('Post-processing (stage 2)...\n', 0)
postlist = postcrack2(postlist)
return usermodules + postlist
def crack2fortran(block):
global f2py_version
pyf = crack2fortrangen(block) + '\n'
header = """! -*- f90 -*-
! Note: the context of this file is case sensitive.
"""
footer = """
! This file was auto-generated with f2py (version:%s).
! See http://cens.ioc.ee/projects/f2py2e/
""" % (f2py_version)
return header + pyf + footer
if __name__ == "__main__":
files = []
funcs = []
f = 1
f2 = 0
f3 = 0
showblocklist = 0
for l in sys.argv[1:]:
if l == '':
pass
elif l[0] == ':':
f = 0
elif l == '-quiet':
quiet = 1
verbose = 0
elif l == '-verbose':
verbose = 2
quiet = 0
elif l == '-fix':
if strictf77:
outmess(
'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0)
skipemptyends = 1
sourcecodeform = 'fix'
elif l == '-skipemptyends':
skipemptyends = 1
elif l == '--ignore-contains':
ignorecontains = 1
elif l == '-f77':
strictf77 = 1
sourcecodeform = 'fix'
elif l == '-f90':
strictf77 = 0
sourcecodeform = 'free'
skipemptyends = 1
elif l == '-h':
f2 = 1
elif l == '-show':
showblocklist = 1
elif l == '-m':
f3 = 1
elif l[0] == '-':
errmess('Unknown option %s\n' % repr(l))
elif f2:
f2 = 0
pyffilename = l
elif f3:
f3 = 0
f77modulename = l
elif f:
try:
open(l).close()
files.append(l)
except IOError as detail:
errmess('IOError: %s\n' % str(detail))
else:
funcs.append(l)
if not strictf77 and f77modulename and not skipemptyends:
outmess("""\
Warning: You have specifyied module name for non Fortran 77 code
that should not need one (expect if you are scanning F90 code
for non module blocks but then you should use flag -skipemptyends
and also be sure that the files do not contain programs without program statement).
""", 0)
postlist = crackfortran(files, funcs)
if pyffilename:
outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0)
pyf = crack2fortran(postlist)
f = open(pyffilename, 'w')
f.write(pyf)
f.close()
if showblocklist:
show(postlist)
| apache-2.0 |
tennc/webshell | php/create_code_with_xor.py | 1 | 2908 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# name: yihuo.py
# http://www.opensource.org/licenses/mit-license
# MIT License
# from: https://www.sqlsec.com/2020/07/shell.html#toc-heading-24
# Copyright (c) 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import string
from urllib.parse import quote
keys = list(range(65)) + list(range(91, 97)) + list(range(123, 127))
results = []
for i in keys:
for j in keys:
asscii_number = i ^ j
if (asscii_number >= 65 and asscii_number <= 90) or (asscii_number >= 97 and asscii_number <= 122):
if i < 32 and j < 32:
temp = (
f'{chr(asscii_number)} = ascii:{i} ^ ascii{j} = {quote(chr(i))} ^ {quote(chr(j))}', chr(asscii_number))
results.append(temp)
elif i < 32 and j >= 32:
temp = (
f'{chr(asscii_number)} = ascii:{i} ^ {chr(j)} = {quote(chr(i))} ^ {quote(chr(j))}', chr(asscii_number))
results.append(temp)
elif i >= 32 and j < 32:
temp = (
f'{chr(asscii_number)} = {chr(i)} ^ ascii{j} = {quote(chr(i))} ^ {quote(chr(j))}', chr(asscii_number))
results.append(temp)
else:
temp = (f'{chr(asscii_number)} = {chr(i)} ^ {chr(j)} = {quote(chr(i))} ^ {quote(chr(j))}', chr(asscii_number))
results.append(temp)
results.sort(key=lambda x: x[1], reverse=False)
for low_case in string.ascii_lowercase:
for result in results:
if low_case in result:
print(result[0])
for upper_case in string.ascii_uppercase:
for result in results:
if upper_case in result:
print(result[0])
| gpl-3.0 |
marcuskelly/recover | Lib/site-packages/pip/_vendor/requests/packages/chardet/langthaimodel.py | 2930 | 11275 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
| bsd-2-clause |
appsembler/edx-platform | lms/djangoapps/django_comment_client/base/views.py | 9 | 27767 | import functools
import json
import logging
import random
import time
import urlparse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core import exceptions
from django.http import Http404, HttpResponse, HttpResponseServerError
from django.utils.translation import ugettext as _
from django.views.decorators import csrf
from django.views.decorators.http import require_GET, require_POST
from opaque_keys.edx.keys import CourseKey
from six import text_type
import django_comment_client.settings as cc_settings
import lms.lib.comment_client as cc
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_course_overview_with_access, get_course_with_access
from django_comment_client.permissions import check_permissions_by_view, get_team, has_permission
from django_comment_client.utils import (
JsonError,
JsonResponse,
add_courseware_context,
discussion_category_id_access,
get_ability,
get_annotated_content_info,
get_cached_discussion_id_map,
get_group_id_for_comments_service,
get_user_group_ids,
is_comment_too_deep,
prepare_content
)
from django_comment_common.signals import (
comment_created,
comment_deleted,
comment_edited,
comment_endorsed,
comment_voted,
thread_created,
thread_deleted,
thread_edited,
thread_voted,
thread_followed,
thread_unfollowed,
)
from django_comment_common.utils import ThreadContext
import eventtracking
from lms.djangoapps.courseware.exceptions import CourseAccessRedirect
from util.file import store_uploaded_file
log = logging.getLogger(__name__)
TRACKING_MAX_FORUM_BODY = 2000
TRACKING_MAX_FORUM_TITLE = 1000
_EVENT_NAME_TEMPLATE = 'edx.forum.{obj_type}.{action_name}'
def track_forum_event(request, event_name, course, obj, data, id_map=None):
"""
Send out an analytics event when a forum event happens. Works for threads,
responses to threads, and comments on those responses.
"""
user = request.user
data['id'] = obj.id
commentable_id = data['commentable_id']
team = get_team(commentable_id)
if team is not None:
data.update(team_id=team.team_id)
if id_map is None:
id_map = get_cached_discussion_id_map(course, [commentable_id], user)
if commentable_id in id_map:
data['category_name'] = id_map[commentable_id]["title"]
data['category_id'] = commentable_id
data['url'] = request.META.get('HTTP_REFERER', '')
data['user_forums_roles'] = [
role.name for role in user.roles.filter(course_id=course.id)
]
data['user_course_roles'] = [
role.role for role in user.courseaccessrole_set.filter(course_id=course.id)
]
eventtracking.tracker.emit(event_name, data)
def track_created_event(request, event_name, course, obj, data):
"""
Send analytics event for a newly created thread, response or comment.
"""
if len(obj.body) > TRACKING_MAX_FORUM_BODY:
data['truncated'] = True
else:
data['truncated'] = False
data['body'] = obj.body[:TRACKING_MAX_FORUM_BODY]
track_forum_event(request, event_name, course, obj, data)
def add_truncated_title_to_event_data(event_data, full_title): # pylint: disable=invalid-name
event_data['title_truncated'] = (len(full_title) > TRACKING_MAX_FORUM_TITLE)
event_data['title'] = full_title[:TRACKING_MAX_FORUM_TITLE]
def track_thread_created_event(request, course, thread, followed):
"""
Send analytics event for a newly created thread.
"""
event_name = _EVENT_NAME_TEMPLATE.format(obj_type='thread', action_name='created')
event_data = {
'commentable_id': thread.commentable_id,
'group_id': thread.get("group_id"),
'thread_type': thread.thread_type,
'anonymous': thread.anonymous,
'anonymous_to_peers': thread.anonymous_to_peers,
'options': {'followed': followed},
# There is a stated desire for an 'origin' property that will state
# whether this thread was created via courseware or the forum.
# However, the view does not contain that data, and including it will
# likely require changes elsewhere.
}
add_truncated_title_to_event_data(event_data, thread.title)
track_created_event(request, event_name, course, thread, event_data)
def track_comment_created_event(request, course, comment, commentable_id, followed):
"""
Send analytics event for a newly created response or comment.
"""
obj_type = 'comment' if comment.get("parent_id") else 'response'
event_name = _EVENT_NAME_TEMPLATE.format(obj_type=obj_type, action_name='created')
event_data = {
'discussion': {'id': comment.thread_id},
'commentable_id': commentable_id,
'options': {'followed': followed},
}
parent_id = comment.get('parent_id')
if parent_id:
event_data['response'] = {'id': parent_id}
track_created_event(request, event_name, course, comment, event_data)
def track_voted_event(request, course, obj, vote_value, undo_vote=False):
"""
Send analytics event for a vote on a thread or response.
"""
if isinstance(obj, cc.Thread):
obj_type = 'thread'
else:
obj_type = 'response'
event_name = _EVENT_NAME_TEMPLATE.format(obj_type=obj_type, action_name='voted')
event_data = {
'commentable_id': obj.commentable_id,
'target_username': obj.get('username'),
'undo_vote': undo_vote,
'vote_value': vote_value,
}
track_forum_event(request, event_name, course, obj, event_data)
def track_thread_viewed_event(request, course, thread):
"""
Send analytics event for a viewed thread.
"""
event_name = _EVENT_NAME_TEMPLATE.format(obj_type='thread', action_name='viewed')
event_data = {}
event_data['commentable_id'] = thread.commentable_id
if hasattr(thread, 'username'):
event_data['target_username'] = thread.username
add_truncated_title_to_event_data(event_data, thread.title)
track_forum_event(request, event_name, course, thread, event_data)
def permitted(func):
"""
View decorator to verify the user is authorized to access this endpoint.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
"""
Wrapper for the view that only calls the view if the user is authorized.
"""
def fetch_content():
"""
Extract the forum object from the keyword arguments to the view.
"""
user_group_id = None
content_user_group_id = None
if "thread_id" in kwargs:
content = cc.Thread.find(kwargs["thread_id"]).to_dict()
elif "comment_id" in kwargs:
content = cc.Comment.find(kwargs["comment_id"]).to_dict()
elif "commentable_id" in kwargs:
content = cc.Commentable.find(kwargs["commentable_id"]).to_dict()
else:
content = None
if 'username' in content:
(user_group_id, content_user_group_id) = get_user_group_ids(course_key, content, request.user)
return content, user_group_id, content_user_group_id
course_key = CourseKey.from_string(kwargs['course_id'])
content, user_group_id, content_user_group_id = fetch_content()
if check_permissions_by_view(request.user, course_key, content,
request.view_name, user_group_id, content_user_group_id):
return func(request, *args, **kwargs)
else:
return JsonError("unauthorized", status=401)
return wrapper
def ajax_content_response(request, course_key, content):
"""
Standard AJAX response returning the content hierarchy of the current thread.
"""
user_info = cc.User.from_django_user(request.user).to_dict()
annotated_content_info = get_annotated_content_info(course_key, content, request.user, user_info)
return JsonResponse({
'content': prepare_content(content, course_key),
'annotated_content_info': annotated_content_info,
})
@require_POST
@login_required
@permitted
def create_thread(request, course_id, commentable_id):
"""
Given a course and commentable ID, create the thread
"""
log.debug("Creating new thread in %r, id %r", course_id, commentable_id)
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
post = request.POST
user = request.user
if course.allow_anonymous:
anonymous = post.get('anonymous', 'false').lower() == 'true'
else:
anonymous = False
if course.allow_anonymous_to_peers:
anonymous_to_peers = post.get('anonymous_to_peers', 'false').lower() == 'true'
else:
anonymous_to_peers = False
if 'title' not in post or not post['title'].strip():
return JsonError(_("Title can't be empty"))
if 'body' not in post or not post['body'].strip():
return JsonError(_("Body can't be empty"))
params = {
'anonymous': anonymous,
'anonymous_to_peers': anonymous_to_peers,
'commentable_id': commentable_id,
'course_id': text_type(course_key),
'user_id': user.id,
'thread_type': post["thread_type"],
'body': post["body"],
'title': post["title"],
}
# Check for whether this commentable belongs to a team, and add the right context
if get_team(commentable_id) is not None:
params['context'] = ThreadContext.STANDALONE
else:
params['context'] = ThreadContext.COURSE
thread = cc.Thread(**params)
# Divide the thread if required
try:
group_id = get_group_id_for_comments_service(request, course_key, commentable_id)
except ValueError:
return HttpResponseServerError("Invalid group id for commentable")
if group_id is not None:
thread.group_id = group_id
thread.save()
thread_created.send(sender=None, user=user, post=thread)
# patch for backward compatibility to comments service
if 'pinned' not in thread.attributes:
thread['pinned'] = False
follow = post.get('auto_subscribe', 'false').lower() == 'true'
if follow:
cc_user = cc.User.from_django_user(user)
cc_user.follow(thread)
thread_followed.send(sender=None, user=user, post=thread)
data = thread.to_dict()
add_courseware_context([data], course, user)
track_thread_created_event(request, course, thread, follow)
if request.is_ajax():
return ajax_content_response(request, course_key, data)
else:
return JsonResponse(prepare_content(data, course_key))
@require_POST
@login_required
@permitted
def update_thread(request, course_id, thread_id):
"""
Given a course id and thread id, update a existing thread, used for both static and ajax submissions
"""
if 'title' not in request.POST or not request.POST['title'].strip():
return JsonError(_("Title can't be empty"))
if 'body' not in request.POST or not request.POST['body'].strip():
return JsonError(_("Body can't be empty"))
course_key = CourseKey.from_string(course_id)
thread = cc.Thread.find(thread_id)
# Get thread context first in order to be safe from reseting the values of thread object later
thread_context = getattr(thread, "context", "course")
thread.body = request.POST["body"]
thread.title = request.POST["title"]
user = request.user
# The following checks should avoid issues we've seen during deploys, where end users are hitting an updated server
# while their browser still has the old client code. This will avoid erasing present values in those cases.
if "thread_type" in request.POST:
thread.thread_type = request.POST["thread_type"]
if "commentable_id" in request.POST:
commentable_id = request.POST["commentable_id"]
course = get_course_with_access(user, 'load', course_key)
if thread_context == "course" and not discussion_category_id_access(course, user, commentable_id):
return JsonError(_("Topic doesn't exist"))
else:
thread.commentable_id = commentable_id
thread.save()
thread_edited.send(sender=None, user=user, post=thread)
if request.is_ajax():
return ajax_content_response(request, course_key, thread.to_dict())
else:
return JsonResponse(prepare_content(thread.to_dict(), course_key))
def _create_comment(request, course_key, thread_id=None, parent_id=None):
"""
given a course_key, thread_id, and parent_id, create a comment,
called from create_comment to do the actual creation
"""
assert isinstance(course_key, CourseKey)
post = request.POST
user = request.user
if 'body' not in post or not post['body'].strip():
return JsonError(_("Body can't be empty"))
course = get_course_with_access(user, 'load', course_key)
if course.allow_anonymous:
anonymous = post.get('anonymous', 'false').lower() == 'true'
else:
anonymous = False
if course.allow_anonymous_to_peers:
anonymous_to_peers = post.get('anonymous_to_peers', 'false').lower() == 'true'
else:
anonymous_to_peers = False
comment = cc.Comment(
anonymous=anonymous,
anonymous_to_peers=anonymous_to_peers,
user_id=user.id,
course_id=text_type(course_key),
thread_id=thread_id,
parent_id=parent_id,
body=post["body"]
)
comment.save()
comment_created.send(sender=None, user=user, post=comment)
followed = post.get('auto_subscribe', 'false').lower() == 'true'
if followed:
cc_user = cc.User.from_django_user(request.user)
cc_user.follow(comment.thread)
track_comment_created_event(request, course, comment, comment.thread.commentable_id, followed)
if request.is_ajax():
return ajax_content_response(request, course_key, comment.to_dict())
else:
return JsonResponse(prepare_content(comment.to_dict(), course.id))
@require_POST
@login_required
@permitted
def create_comment(request, course_id, thread_id):
"""
given a course_id and thread_id, test for comment depth. if not too deep,
call _create_comment to create the actual comment.
"""
if is_comment_too_deep(parent=None):
return JsonError(_("Comment level too deep"))
return _create_comment(request, CourseKey.from_string(course_id), thread_id=thread_id)
@require_POST
@login_required
@permitted
def delete_thread(request, course_id, thread_id):
"""
given a course_id and thread_id, delete this thread
this is ajax only
"""
course_key = CourseKey.from_string(course_id)
thread = cc.Thread.find(thread_id)
thread.delete()
thread_deleted.send(sender=None, user=request.user, post=thread)
return JsonResponse(prepare_content(thread.to_dict(), course_key))
@require_POST
@login_required
@permitted
def update_comment(request, course_id, comment_id):
"""
given a course_id and comment_id, update the comment with payload attributes
handles static and ajax submissions
"""
course_key = CourseKey.from_string(course_id)
comment = cc.Comment.find(comment_id)
if 'body' not in request.POST or not request.POST['body'].strip():
return JsonError(_("Body can't be empty"))
comment.body = request.POST["body"]
comment.save()
comment_edited.send(sender=None, user=request.user, post=comment)
if request.is_ajax():
return ajax_content_response(request, course_key, comment.to_dict())
else:
return JsonResponse(prepare_content(comment.to_dict(), course_key))
@require_POST
@login_required
@permitted
def endorse_comment(request, course_id, comment_id):
"""
given a course_id and comment_id, toggle the endorsement of this comment,
ajax only
"""
course_key = CourseKey.from_string(course_id)
comment = cc.Comment.find(comment_id)
user = request.user
comment.endorsed = request.POST.get('endorsed', 'false').lower() == 'true'
comment.endorsement_user_id = user.id
comment.save()
comment_endorsed.send(sender=None, user=user, post=comment)
return JsonResponse(prepare_content(comment.to_dict(), course_key))
@require_POST
@login_required
@permitted
def openclose_thread(request, course_id, thread_id):
"""
given a course_id and thread_id, toggle the status of this thread
ajax only
"""
course_key = CourseKey.from_string(course_id)
thread = cc.Thread.find(thread_id)
thread.closed = request.POST.get('closed', 'false').lower() == 'true'
thread.save()
return JsonResponse({
'content': prepare_content(thread.to_dict(), course_key),
'ability': get_ability(course_key, thread.to_dict(), request.user),
})
@require_POST
@login_required
@permitted
def create_sub_comment(request, course_id, comment_id):
"""
given a course_id and comment_id, create a response to a comment
after checking the max depth allowed, if allowed
"""
if is_comment_too_deep(parent=cc.Comment(comment_id)):
return JsonError(_("Comment level too deep"))
return _create_comment(request, CourseKey.from_string(course_id), parent_id=comment_id)
@require_POST
@login_required
@permitted
def delete_comment(request, course_id, comment_id):
"""
given a course_id and comment_id delete this comment
ajax only
"""
course_key = CourseKey.from_string(course_id)
comment = cc.Comment.find(comment_id)
comment.delete()
comment_deleted.send(sender=None, user=request.user, post=comment)
return JsonResponse(prepare_content(comment.to_dict(), course_key))
def _vote_or_unvote(request, course_id, obj, value='up', undo_vote=False):
"""
Vote or unvote for a thread or a response.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
user = cc.User.from_django_user(request.user)
if undo_vote:
user.unvote(obj)
# TODO(smarnach): Determine the value of the vote that is undone. Currently, you can
# only cast upvotes in the user interface, so it is assumed that the vote value is 'up'.
# (People could theoretically downvote by handcrafting AJAX requests.)
else:
user.vote(obj, value)
thread_voted.send(sender=None, user=request.user, post=obj)
track_voted_event(request, course, obj, value, undo_vote)
return JsonResponse(prepare_content(obj.to_dict(), course_key))
@require_POST
@login_required
@permitted
def vote_for_comment(request, course_id, comment_id, value):
"""
Given a course_id and comment_id, vote for this response. AJAX only.
"""
comment = cc.Comment.find(comment_id)
result = _vote_or_unvote(request, course_id, comment, value)
comment_voted.send(sender=None, user=request.user, post=comment)
return result
@require_POST
@login_required
@permitted
def undo_vote_for_comment(request, course_id, comment_id):
"""
given a course id and comment id, remove vote
ajax only
"""
return _vote_or_unvote(request, course_id, cc.Comment.find(comment_id), undo_vote=True)
@require_POST
@login_required
@permitted
def vote_for_thread(request, course_id, thread_id, value):
"""
given a course id and thread id vote for this thread
ajax only
"""
thread = cc.Thread.find(thread_id)
result = _vote_or_unvote(request, course_id, thread, value)
return result
@require_POST
@login_required
@permitted
def undo_vote_for_thread(request, course_id, thread_id):
"""
given a course id and thread id, remove users vote for thread
ajax only
"""
return _vote_or_unvote(request, course_id, cc.Thread.find(thread_id), undo_vote=True)
@require_POST
@login_required
@permitted
def flag_abuse_for_thread(request, course_id, thread_id):
"""
given a course_id and thread_id flag this thread for abuse
ajax only
"""
course_key = CourseKey.from_string(course_id)
user = cc.User.from_django_user(request.user)
thread = cc.Thread.find(thread_id)
thread.flagAbuse(user, thread)
return JsonResponse(prepare_content(thread.to_dict(), course_key))
@require_POST
@login_required
@permitted
def un_flag_abuse_for_thread(request, course_id, thread_id):
"""
given a course id and thread id, remove abuse flag for this thread
ajax only
"""
user = cc.User.from_django_user(request.user)
course_key = CourseKey.from_string(course_id)
course = get_course_by_id(course_key)
thread = cc.Thread.find(thread_id)
remove_all = bool(
has_permission(request.user, 'openclose_thread', course_key) or
has_access(request.user, 'staff', course)
)
thread.unFlagAbuse(user, thread, remove_all)
return JsonResponse(prepare_content(thread.to_dict(), course_key))
@require_POST
@login_required
@permitted
def flag_abuse_for_comment(request, course_id, comment_id):
"""
given a course and comment id, flag comment for abuse
ajax only
"""
course_key = CourseKey.from_string(course_id)
user = cc.User.from_django_user(request.user)
comment = cc.Comment.find(comment_id)
comment.flagAbuse(user, comment)
return JsonResponse(prepare_content(comment.to_dict(), course_key))
@require_POST
@login_required
@permitted
def un_flag_abuse_for_comment(request, course_id, comment_id):
"""
given a course_id and comment id, unflag comment for abuse
ajax only
"""
user = cc.User.from_django_user(request.user)
course_key = CourseKey.from_string(course_id)
course = get_course_by_id(course_key)
remove_all = bool(
has_permission(request.user, 'openclose_thread', course_key) or
has_access(request.user, 'staff', course)
)
comment = cc.Comment.find(comment_id)
comment.unFlagAbuse(user, comment, remove_all)
return JsonResponse(prepare_content(comment.to_dict(), course_key))
@require_POST
@login_required
@permitted
def pin_thread(request, course_id, thread_id):
"""
given a course id and thread id, pin this thread
ajax only
"""
course_key = CourseKey.from_string(course_id)
user = cc.User.from_django_user(request.user)
thread = cc.Thread.find(thread_id)
thread.pin(user, thread_id)
return JsonResponse(prepare_content(thread.to_dict(), course_key))
@require_POST
@login_required
@permitted
def un_pin_thread(request, course_id, thread_id):
"""
given a course id and thread id, remove pin from this thread
ajax only
"""
course_key = CourseKey.from_string(course_id)
user = cc.User.from_django_user(request.user)
thread = cc.Thread.find(thread_id)
thread.un_pin(user, thread_id)
return JsonResponse(prepare_content(thread.to_dict(), course_key))
@require_POST
@login_required
@permitted
def follow_thread(request, course_id, thread_id):
user = cc.User.from_django_user(request.user)
thread = cc.Thread.find(thread_id)
user.follow(thread)
thread_followed.send(sender=None, user=request.user, post=thread)
return JsonResponse({})
@require_POST
@login_required
@permitted
def follow_commentable(request, course_id, commentable_id):
"""
given a course_id and commentable id, follow this commentable
ajax only
"""
user = cc.User.from_django_user(request.user)
commentable = cc.Commentable.find(commentable_id)
user.follow(commentable)
return JsonResponse({})
@require_POST
@login_required
@permitted
def unfollow_thread(request, course_id, thread_id):
"""
given a course id and thread id, stop following this thread
ajax only
"""
user = cc.User.from_django_user(request.user)
thread = cc.Thread.find(thread_id)
user.unfollow(thread)
thread_unfollowed.send(sender=None, user=request.user, post=thread)
return JsonResponse({})
@require_POST
@login_required
@permitted
def unfollow_commentable(request, course_id, commentable_id):
"""
given a course id and commentable id stop following commentable
ajax only
"""
user = cc.User.from_django_user(request.user)
commentable = cc.Commentable.find(commentable_id)
user.unfollow(commentable)
return JsonResponse({})
@require_POST
@login_required
@csrf.csrf_exempt
def upload(request, course_id): # ajax upload file to a question or answer
"""view that handles file upload via Ajax
"""
# check upload permission
error = ''
new_file_name = ''
try:
# TODO authorization
#may raise exceptions.PermissionDenied
#if request.user.is_anonymous:
# msg = _('Sorry, anonymous users cannot upload files')
# raise exceptions.PermissionDenied(msg)
#request.user.assert_can_upload_file()
base_file_name = str(time.time()).replace('.', str(random.randint(0, 100000)))
file_storage, new_file_name = store_uploaded_file(
request, 'file-upload', cc_settings.ALLOWED_UPLOAD_FILE_TYPES, base_file_name,
max_file_size=cc_settings.MAX_UPLOAD_FILE_SIZE
)
except exceptions.PermissionDenied, err:
error = unicode(err)
except Exception, err:
print err
logging.critical(unicode(err))
error = _('Error uploading file. Please contact the site administrator. Thank you.')
if error == '':
result = _('Good')
file_url = file_storage.url(new_file_name)
parsed_url = urlparse.urlparse(file_url)
file_url = urlparse.urlunparse(
urlparse.ParseResult(
parsed_url.scheme,
parsed_url.netloc,
parsed_url.path,
'', '', ''
)
)
else:
result = ''
file_url = ''
# Using content-type of text/plain here instead of JSON because
# IE doesn't know how to handle the JSON response and prompts the
# user to save the JSON as a file instead of passing it to the callback.
return HttpResponse(json.dumps({
'result': {
'msg': result,
'error': error,
'file_url': file_url,
}
}), content_type="text/plain")
@require_GET
@login_required
def users(request, course_id):
"""
Given a `username` query parameter, find matches for users in the forum for this course.
Only exact matches are supported here, so the length of the result set will either be 0 or 1.
"""
course_key = CourseKey.from_string(course_id)
try:
get_course_overview_with_access(request.user, 'load', course_key, check_if_enrolled=True)
except Http404:
# course didn't exist, or requesting user does not have access to it.
return JsonError(status=404)
except CourseAccessRedirect:
# user does not have access to the course.
return JsonError(status=404)
try:
username = request.GET['username']
except KeyError:
# 400 is default status for JsonError
return JsonError(["username parameter is required"])
user_objs = []
try:
matched_user = User.objects.get(username=username)
cc_user = cc.User.from_django_user(matched_user)
cc_user.course_id = course_key
cc_user.retrieve(complete=False)
if (cc_user['threads_count'] + cc_user['comments_count']) > 0:
user_objs.append({
'id': matched_user.id,
'username': matched_user.username,
})
except User.DoesNotExist:
pass
return JsonResponse({"users": user_objs})
| agpl-3.0 |
w1r0x/ansible | test/units/playbook/test_playbook.py | 290 | 2230 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook import Playbook
from ansible.vars import VariableManager
from units.mock.loader import DictDataLoader
class TestPlaybook(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_playbook(self):
fake_loader = DictDataLoader({})
p = Playbook(loader=fake_loader)
def test_basic_playbook(self):
fake_loader = DictDataLoader({
"test_file.yml":"""
- hosts: all
""",
})
p = Playbook.load("test_file.yml", loader=fake_loader)
plays = p.get_plays()
def test_bad_playbook_files(self):
fake_loader = DictDataLoader({
# represents a playbook which is not a list of plays
"bad_list.yml": """
foo: bar
""",
# represents a playbook where a play entry is mis-formatted
"bad_entry.yml": """
-
- "This should be a mapping..."
""",
})
vm = VariableManager()
self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader)
self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader)
| gpl-3.0 |
ssudholt/phocnet | tools/predict_phocs.py | 1 | 2673 | #!/usr/bin/env python
'''
Script for predicting PHOCs for a number of images residing in a folder on disk.
'''
import argparse
import logging
import os
import caffe
import numpy as np
import cv2
from phocnet.evaluation.cnn import net_output_for_word_image_list
def main(img_dir, output_dir, pretrained_phocnet, deploy_proto, min_image_width_height, gpu_id):
logging_format = '[%(asctime)-19s, %(name)s, %(levelname)s] %(message)s'
logging.basicConfig(level=logging.INFO,
format=logging_format)
logger = logging.getLogger('Predict PHOCs')
if gpu_id is None:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
logger.info('Loading PHOCNet...')
phocnet = caffe.Net(deploy_proto, caffe.TEST, weights=pretrained_phocnet)
# find all images in the supplied dir
logger.info('Found %d word images to process', len(os.listdir(img_dir)))
word_img_list = [cv2.imread(os.path.join(img_dir, filename), cv2.CV_LOAD_IMAGE_GRAYSCALE)
for filename in sorted(os.listdir(img_dir)) if filename not in ['.', '..']]
# push images through the PHOCNet
logger.info('Predicting PHOCs...')
predicted_phocs = net_output_for_word_image_list(phocnet=phocnet, word_img_list=word_img_list,
min_img_width_height=min_image_width_height)
# save everything
logger.info('Saving...')
np.save(os.path.join(output_dir, 'predicted_phocs.npy'), predicted_phocs)
logger.info('Finished')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Predict PHOCs from a pretrained PHOCNet. The PHOCs are saved as Numpy Array to disk.')
parser.add_argument('--min_image_width_height', '-miwh', action='store', type=int, default=26,
help='The minimum image width or height to be passed through the PHOCNet. Default: 26')
parser.add_argument('--output_dir', '-od', action='store', type=str, default='.',
help='The directory where to store the PHOC Numpy Array. Default: .')
parser.add_argument('--img_dir', '-id', action='store', type=str, required=True,
help='All images in this folder are processed in ASCII order of their '+
'respective names. A PHOC is predicted for each.')
parser.add_argument('--pretrained_phocnet', '-pp', action='store', type=str, required=True,
help='Path to a pretrained PHOCNet binaryproto file.')
parser.add_argument('--deploy_proto', '-dp', action='store', type=str, required=True,
help='Path to PHOCNet deploy prototxt file.')
parser.add_argument('--gpu_id', '-gpu', action='store', type=int,
help='The ID of the GPU to use. If not specified, training is run in CPU mode.')
args = vars(parser.parse_args())
main(**args)
| bsd-3-clause |
jrabbit/ubotu-fr | plugins/Anonymous/__init__.py | 18 | 2461 | ###
# Copyright (c) 2005, Daniel DiPaolo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Allows folks to talk through the bot anonymously.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = "%%VERSION%%"
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.authors.strike
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
import config
import plugin
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
t3dev/odoo | addons/l10n_be_invoice_bba/models/account_invoice.py | 1 | 1971 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
import random
import re
from odoo import api, fields, models, _
from odoo.exceptions import UserError
"""
account.invoice object: add support for Belgian structured communication
"""
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.multi
def _get_reference_be_partner(self):
""" This computes the reference based on the belgian national standard
“OGM-VCS”.
For instance, if an invoice is issued for the partner with internal
reference 'food buyer 654', the digits will be extracted and used as
the data. This will lead to a check number equal to 72 and the
reference will be '+++000/0000/65472+++'.
If no reference is set for the partner, its id in the database will
be used.
"""
self.ensure_one()
bbacomm = (re.sub('\D', '', self.partner_id.ref or '') or str(self.partner_id.id))[-10:].rjust(10, '0')
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (bbacomm[:3], bbacomm[3:7], bbacomm[7:], mod)
return reference
@api.multi
def _get_reference_be_invoice(self):
""" This computes the reference based on the belgian national standard
“OGM-VCS”.
The data of the reference is the database id number of the invoice.
For instance, if an invoice is issued with id 654, the check number
is 72 so the reference will be '+++000/0000/65472+++'.
"""
self.ensure_one()
base = self.id
bbacomm = str(base).rjust(10, '0')
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (bbacomm[:3], bbacomm[3:7], bbacomm[7:], mod)
return reference
| gpl-3.0 |
zielmicha/couchdb-python | couchdb/mapping.py | 5 | 22390 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Mapping from raw JSON data structures to Python objects and vice versa.
>>> from couchdb import Server
>>> server = Server()
>>> db = server.create('python-tests')
To define a document mapping, you declare a Python class inherited from
`Document`, and add any number of `Field` attributes:
>>> from datetime import datetime
>>> from couchdb.mapping import Document, TextField, IntegerField, DateTimeField
>>> class Person(Document):
... name = TextField()
... age = IntegerField()
... added = DateTimeField(default=datetime.now)
>>> person = Person(name='John Doe', age=42)
>>> person.store(db) #doctest: +ELLIPSIS
<Person ...>
>>> person.age
42
You can then load the data from the CouchDB server through your `Document`
subclass, and conveniently access all attributes:
>>> person = Person.load(db, person.id)
>>> old_rev = person.rev
>>> person.name
u'John Doe'
>>> person.age
42
>>> person.added #doctest: +ELLIPSIS
datetime.datetime(...)
To update a document, simply set the attributes, and then call the ``store()``
method:
>>> person.name = 'John R. Doe'
>>> person.store(db) #doctest: +ELLIPSIS
<Person ...>
If you retrieve the document from the server again, you should be getting the
updated data:
>>> person = Person.load(db, person.id)
>>> person.name
u'John R. Doe'
>>> person.rev != old_rev
True
>>> del server['python-tests']
"""
import copy
from calendar import timegm
from datetime import date, datetime, time
from decimal import Decimal
from time import strptime, struct_time
from couchdb.design import ViewDefinition
__all__ = ['Mapping', 'Document', 'Field', 'TextField', 'FloatField',
'IntegerField', 'LongField', 'BooleanField', 'DecimalField',
'DateField', 'DateTimeField', 'TimeField', 'DictField', 'ListField',
'ViewField']
__docformat__ = 'restructuredtext en'
DEFAULT = object()
class Field(object):
"""Basic unit for mapping a piece of data between Python and JSON.
Instances of this class can be added to subclasses of `Document` to describe
the mapping of a document.
"""
def __init__(self, name=None, default=None):
self.name = name
self.default = default
def __get__(self, instance, owner):
if instance is None:
return self
value = instance._data.get(self.name)
if value is not None:
value = self._to_python(value)
elif self.default is not None:
default = self.default
if callable(default):
default = default()
value = default
return value
def __set__(self, instance, value):
if value is not None:
value = self._to_json(value)
instance._data[self.name] = value
def _to_python(self, value):
return unicode(value)
def _to_json(self, value):
return self._to_python(value)
class MappingMeta(type):
def __new__(cls, name, bases, d):
fields = {}
for base in bases:
if hasattr(base, '_fields'):
fields.update(base._fields)
for attrname, attrval in d.items():
if isinstance(attrval, Field):
if not attrval.name:
attrval.name = attrname
fields[attrname] = attrval
d['_fields'] = fields
return type.__new__(cls, name, bases, d)
class Mapping(object):
__metaclass__ = MappingMeta
def __init__(self, **values):
self._data = {}
for attrname, field in self._fields.items():
if attrname in values:
setattr(self, attrname, values.pop(attrname))
else:
setattr(self, attrname, getattr(self, attrname))
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data or ())
def __delitem__(self, name):
del self._data[name]
def __getitem__(self, name):
return self._data[name]
def __setitem__(self, name, value):
self._data[name] = value
def get(self, name, default=None):
return self._data.get(name, default)
def setdefault(self, name, default):
return self._data.setdefault(name, default)
def unwrap(self):
return self._data
@classmethod
def build(cls, **d):
fields = {}
for attrname, attrval in d.items():
if not attrval.name:
attrval.name = attrname
fields[attrname] = attrval
d['_fields'] = fields
return type('AnonymousStruct', (cls,), d)
@classmethod
def wrap(cls, data):
instance = cls()
instance._data = data
return instance
def _to_python(self, value):
return self.wrap(value)
def _to_json(self, value):
return self.unwrap()
class ViewField(object):
r"""Descriptor that can be used to bind a view definition to a property of
a `Document` class.
>>> class Person(Document):
... name = TextField()
... age = IntegerField()
... by_name = ViewField('people', '''\
... function(doc) {
... emit(doc.name, doc);
... }''')
>>> Person.by_name
<ViewDefinition '_design/people/_view/by_name'>
>>> print Person.by_name.map_fun
function(doc) {
emit(doc.name, doc);
}
That property can be used as a function, which will execute the view.
>>> from couchdb import Database
>>> db = Database('python-tests')
>>> Person.by_name(db, count=3)
<ViewResults <PermanentView '_design/people/_view/by_name'> {'count': 3}>
The results produced by the view are automatically wrapped in the
`Document` subclass the descriptor is bound to. In this example, it would
return instances of the `Person` class. But please note that this requires
the values of the view results to be dictionaries that can be mapped to the
mapping defined by the containing `Document` class. Alternatively, the
``include_docs`` query option can be used to inline the actual documents in
the view results, which will then be used instead of the values.
If you use Python view functions, this class can also be used as a
decorator:
>>> class Person(Document):
... name = TextField()
... age = IntegerField()
...
... @ViewField.define('people')
... def by_name(doc):
... yield doc['name'], doc
>>> Person.by_name
<ViewDefinition '_design/people/_view/by_name'>
>>> print Person.by_name.map_fun
def by_name(doc):
yield doc['name'], doc
"""
def __init__(self, design, map_fun, reduce_fun=None, name=None,
language='javascript', wrapper=DEFAULT, **defaults):
"""Initialize the view descriptor.
:param design: the name of the design document
:param map_fun: the map function code
:param reduce_fun: the reduce function code (optional)
:param name: the actual name of the view in the design document, if
it differs from the name the descriptor is assigned to
:param language: the name of the language used
:param wrapper: an optional callable that should be used to wrap the
result rows
:param defaults: default query string parameters to apply
"""
self.design = design
self.name = name
self.map_fun = map_fun
self.reduce_fun = reduce_fun
self.language = language
self.wrapper = wrapper
self.defaults = defaults
@classmethod
def define(cls, design, name=None, language='python', wrapper=DEFAULT,
**defaults):
"""Factory method for use as a decorator (only suitable for Python
view code).
"""
def view_wrapped(fun):
return cls(design, fun, language=language, wrapper=wrapper,
**defaults)
return view_wrapped
def __get__(self, instance, cls=None):
if self.wrapper is DEFAULT:
wrapper = cls._wrap_row
else:
wrapper = self.wrapper
return ViewDefinition(self.design, self.name, self.map_fun,
self.reduce_fun, language=self.language,
wrapper=wrapper, **self.defaults)
class DocumentMeta(MappingMeta):
def __new__(cls, name, bases, d):
for attrname, attrval in d.items():
if isinstance(attrval, ViewField):
if not attrval.name:
attrval.name = attrname
return MappingMeta.__new__(cls, name, bases, d)
class Document(Mapping):
__metaclass__ = DocumentMeta
def __init__(self, id=None, **values):
Mapping.__init__(self, **values)
if id is not None:
self.id = id
def __repr__(self):
return '<%s %r@%r %r>' % (type(self).__name__, self.id, self.rev,
dict([(k, v) for k, v in self._data.items()
if k not in ('_id', '_rev')]))
def _get_id(self):
if hasattr(self._data, 'id'): # When data is client.Document
return self._data.id
return self._data.get('_id')
def _set_id(self, value):
if self.id is not None:
raise AttributeError('id can only be set on new documents')
self._data['_id'] = value
id = property(_get_id, _set_id, doc='The document ID')
@property
def rev(self):
"""The document revision.
:rtype: basestring
"""
if hasattr(self._data, 'rev'): # When data is client.Document
return self._data.rev
return self._data.get('_rev')
def items(self):
"""Return the fields as a list of ``(name, value)`` tuples.
This method is provided to enable easy conversion to native dictionary
objects, for example to allow use of `mapping.Document` instances with
`client.Database.update`.
>>> class Post(Document):
... title = TextField()
... author = TextField()
>>> post = Post(id='foo-bar', title='Foo bar', author='Joe')
>>> sorted(post.items())
[('_id', 'foo-bar'), ('author', u'Joe'), ('title', u'Foo bar')]
:return: a list of ``(name, value)`` tuples
"""
retval = []
if self.id is not None:
retval.append(('_id', self.id))
if self.rev is not None:
retval.append(('_rev', self.rev))
for name, value in self._data.items():
if name not in ('_id', '_rev'):
retval.append((name, value))
return retval
@classmethod
def load(cls, db, id):
"""Load a specific document from the given database.
:param db: the `Database` object to retrieve the document from
:param id: the document ID
:return: the `Document` instance, or `None` if no document with the
given ID was found
"""
doc = db.get(id)
if doc is None:
return None
return cls.wrap(doc)
def store(self, db):
"""Store the document in the given database."""
db.save(self._data)
return self
@classmethod
def query(cls, db, map_fun, reduce_fun, language='javascript', **options):
"""Execute a CouchDB temporary view and map the result values back to
objects of this mapping.
Note that by default, any properties of the document that are not
included in the values of the view will be treated as if they were
missing from the document. If you want to load the full document for
every row, set the ``include_docs`` option to ``True``.
"""
return db.query(map_fun, reduce_fun=reduce_fun, language=language,
wrapper=cls._wrap_row, **options)
@classmethod
def view(cls, db, viewname, **options):
"""Execute a CouchDB named view and map the result values back to
objects of this mapping.
Note that by default, any properties of the document that are not
included in the values of the view will be treated as if they were
missing from the document. If you want to load the full document for
every row, set the ``include_docs`` option to ``True``.
"""
return db.view(viewname, wrapper=cls._wrap_row, **options)
@classmethod
def _wrap_row(cls, row):
doc = row.get('doc')
if doc is not None:
return cls.wrap(doc)
data = row['value']
data['_id'] = row['id']
return cls.wrap(data)
class TextField(Field):
"""Mapping field for string values."""
_to_python = unicode
class FloatField(Field):
"""Mapping field for float values."""
_to_python = float
class IntegerField(Field):
"""Mapping field for integer values."""
_to_python = int
class LongField(Field):
"""Mapping field for long integer values."""
_to_python = long
class BooleanField(Field):
"""Mapping field for boolean values."""
_to_python = bool
class DecimalField(Field):
"""Mapping field for decimal values."""
def _to_python(self, value):
return Decimal(value)
def _to_json(self, value):
return unicode(value)
class DateField(Field):
"""Mapping field for storing dates.
>>> field = DateField()
>>> field._to_python('2007-04-01')
datetime.date(2007, 4, 1)
>>> field._to_json(date(2007, 4, 1))
'2007-04-01'
>>> field._to_json(datetime(2007, 4, 1, 15, 30))
'2007-04-01'
"""
def _to_python(self, value):
if isinstance(value, basestring):
try:
value = date(*strptime(value, '%Y-%m-%d')[:3])
except ValueError:
raise ValueError('Invalid ISO date %r' % value)
return value
def _to_json(self, value):
if isinstance(value, datetime):
value = value.date()
return value.isoformat()
class DateTimeField(Field):
"""Mapping field for storing date/time values.
>>> field = DateTimeField()
>>> field._to_python('2007-04-01T15:30:00Z')
datetime.datetime(2007, 4, 1, 15, 30)
>>> field._to_json(datetime(2007, 4, 1, 15, 30, 0, 9876))
'2007-04-01T15:30:00Z'
>>> field._to_json(date(2007, 4, 1))
'2007-04-01T00:00:00Z'
"""
def _to_python(self, value):
if isinstance(value, basestring):
try:
value = value.split('.', 1)[0] # strip out microseconds
value = value.rstrip('Z') # remove timezone separator
value = datetime(*strptime(value, '%Y-%m-%dT%H:%M:%S')[:6])
except ValueError:
raise ValueError('Invalid ISO date/time %r' % value)
return value
def _to_json(self, value):
if isinstance(value, struct_time):
value = datetime.utcfromtimestamp(timegm(value))
elif not isinstance(value, datetime):
value = datetime.combine(value, time(0))
return value.replace(microsecond=0).isoformat() + 'Z'
class TimeField(Field):
"""Mapping field for storing times.
>>> field = TimeField()
>>> field._to_python('15:30:00')
datetime.time(15, 30)
>>> field._to_json(time(15, 30))
'15:30:00'
>>> field._to_json(datetime(2007, 4, 1, 15, 30))
'15:30:00'
"""
def _to_python(self, value):
if isinstance(value, basestring):
try:
value = value.split('.', 1)[0] # strip out microseconds
value = time(*strptime(value, '%H:%M:%S')[3:6])
except ValueError:
raise ValueError('Invalid ISO time %r' % value)
return value
def _to_json(self, value):
if isinstance(value, datetime):
value = value.time()
return value.replace(microsecond=0).isoformat()
class DictField(Field):
"""Field type for nested dictionaries.
>>> from couchdb import Server
>>> server = Server()
>>> db = server.create('python-tests')
>>> class Post(Document):
... title = TextField()
... content = TextField()
... author = DictField(Mapping.build(
... name = TextField(),
... email = TextField()
... ))
... extra = DictField()
>>> post = Post(
... title='Foo bar',
... author=dict(name='John Doe',
... email='[email protected]'),
... extra=dict(foo='bar'),
... )
>>> post.store(db) #doctest: +ELLIPSIS
<Post ...>
>>> post = Post.load(db, post.id)
>>> post.author.name
u'John Doe'
>>> post.author.email
u'[email protected]'
>>> post.extra
{'foo': 'bar'}
>>> del server['python-tests']
"""
def __init__(self, mapping=None, name=None, default=None):
default = default or {}
Field.__init__(self, name=name, default=lambda: default.copy())
self.mapping = mapping
def _to_python(self, value):
if self.mapping is None:
return value
else:
return self.mapping.wrap(value)
def _to_json(self, value):
if self.mapping is None:
return value
if not isinstance(value, Mapping):
value = self.mapping(**value)
return value.unwrap()
class ListField(Field):
"""Field type for sequences of other fields.
>>> from couchdb import Server
>>> server = Server()
>>> db = server.create('python-tests')
>>> class Post(Document):
... title = TextField()
... content = TextField()
... pubdate = DateTimeField(default=datetime.now)
... comments = ListField(DictField(Mapping.build(
... author = TextField(),
... content = TextField(),
... time = DateTimeField()
... )))
>>> post = Post(title='Foo bar')
>>> post.comments.append(author='myself', content='Bla bla',
... time=datetime.now())
>>> len(post.comments)
1
>>> post.store(db) #doctest: +ELLIPSIS
<Post ...>
>>> post = Post.load(db, post.id)
>>> comment = post.comments[0]
>>> comment['author']
'myself'
>>> comment['content']
'Bla bla'
>>> comment['time'] #doctest: +ELLIPSIS
'...T...Z'
>>> del server['python-tests']
"""
def __init__(self, field, name=None, default=None):
default = default or []
Field.__init__(self, name=name, default=lambda: copy.copy(default))
if type(field) is type:
if issubclass(field, Field):
field = field()
elif issubclass(field, Mapping):
field = DictField(field)
self.field = field
def _to_python(self, value):
return self.Proxy(value, self.field)
def _to_json(self, value):
return [self.field._to_json(item) for item in value]
class Proxy(list):
def __init__(self, list, field):
self.list = list
self.field = field
def __lt__(self, other):
return self.list < other
def __le__(self, other):
return self.list <= other
def __eq__(self, other):
return self.list == other
def __ne__(self, other):
return self.list != other
def __gt__(self, other):
return self.list > other
def __ge__(self, other):
return self.list >= other
def __repr__(self):
return repr(self.list)
def __str__(self):
return str(self.list)
def __unicode__(self):
return unicode(self.list)
def __delitem__(self, index):
del self.list[index]
def __getitem__(self, index):
return self.field._to_python(self.list[index])
def __setitem__(self, index, value):
self.list[index] = self.field._to_json(value)
def __delslice__(self, i, j):
del self.list[i:j]
def __getslice__(self, i, j):
return ListField.Proxy(self.list[i:j], self.field)
def __setslice__(self, i, j, seq):
self.list[i:j] = (self.field._to_json(v) for v in seq)
def __contains__(self, value):
for item in self.list:
if self.field._to_python(item) == value:
return True
return False
def __iter__(self):
for index in range(len(self)):
yield self[index]
def __len__(self):
return len(self.list)
def __nonzero__(self):
return bool(self.list)
def append(self, *args, **kwargs):
if args or not isinstance(self.field, DictField):
if len(args) != 1:
raise TypeError('append() takes exactly one argument '
'(%s given)' % len(args))
value = args[0]
else:
value = kwargs
self.list.append(self.field._to_json(value))
def count(self, value):
return [i for i in self].count(value)
def extend(self, list):
for item in list:
self.append(item)
def index(self, value):
return self.list.index(self.field._to_json(value))
def insert(self, idx, *args, **kwargs):
if args or not isinstance(self.field, DictField):
if len(args) != 1:
raise TypeError('insert() takes exactly 2 arguments '
'(%s given)' % len(args))
value = args[0]
else:
value = kwargs
self.list.insert(idx, self.field._to_json(value))
def remove(self, value):
return self.list.remove(self.field._to_json(value))
def pop(self, *args):
return self.field._to_python(self.list.pop(*args))
| bsd-3-clause |
simar7/build-mozharness | mozharness/mozilla/mock.py | 11 | 10709 | #!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""Code to integrate with mock
"""
import os.path
import hashlib
import subprocess
import os
ERROR_MSGS = {
'undetermined_buildroot_lock': 'buildroot_lock_path does not exist.\
Nothing to remove.'
}
# MockMixin {{{1
class MockMixin(object):
"""Provides methods to setup and interact with mock environments.
https://wiki.mozilla.org/ReleaseEngineering/Applications/Mock
This is dependent on ScriptMixin
"""
done_mock_setup = False
mock_enabled = False
default_mock_target = None
def init_mock(self, mock_target):
"Initialize mock environment defined by `mock_target`"
cmd = ['mock_mozilla', '-r', mock_target, '--init']
return super(MockMixin, self).run_command(cmd, halt_on_failure=True,
fatal_exit_code=3)
def install_mock_packages(self, mock_target, packages):
"Install `packages` into mock environment `mock_target`"
cmd = ['mock_mozilla', '-r', mock_target, '--install'] + packages
# TODO: parse output to see if packages actually were installed
return super(MockMixin, self).run_command(cmd, halt_on_failure=True,
fatal_exit_code=3)
def delete_mock_files(self, mock_target, files):
"""Delete files from the mock environment `mock_target`. `files` should
be an iterable of 2-tuples: (src, dst). Only the dst component is
deleted."""
cmd_base = ['mock_mozilla', '-r', mock_target, '--shell']
for src, dest in files:
cmd = cmd_base + ['rm -rf %s' % dest]
super(MockMixin, self).run_command(cmd, halt_on_failure=True,
fatal_exit_code=3)
def copy_mock_files(self, mock_target, files):
"""Copy files into the mock environment `mock_target`. `files` should
be an iterable of 2-tuples: (src, dst)"""
cmd_base = ['mock_mozilla', '-r', mock_target, '--copyin', '--unpriv']
for src, dest in files:
cmd = cmd_base + [src, dest]
super(MockMixin, self).run_command(cmd, halt_on_failure=True,
fatal_exit_code=3)
super(MockMixin, self).run_command(
['mock_mozilla', '-r', mock_target, '--shell',
'chown -R mock_mozilla %s' % dest],
halt_on_failure=True,
fatal_exit_code=3)
def get_mock_target(self):
if self.config.get('disable_mock'):
return None
return self.default_mock_target or self.config.get('mock_target')
def enable_mock(self):
"""Wrap self.run_command and self.get_output_from_command to run inside
the mock environment given by self.config['mock_target']"""
if not self.get_mock_target():
return
self.mock_enabled = True
self.run_command = self.run_command_m
self.get_output_from_command = self.get_output_from_command_m
def disable_mock(self):
"""Restore self.run_command and self.get_output_from_command to their
original versions. This is the opposite of self.enable_mock()"""
if not self.get_mock_target():
return
self.mock_enabled = False
self.run_command = super(MockMixin, self).run_command
self.get_output_from_command = super(MockMixin, self).get_output_from_command
def _do_mock_command(self, func, mock_target, command, cwd=None, env=None, **kwargs):
"""Internal helper for preparing commands to run under mock. Used by
run_mock_command and get_mock_output_from_command."""
cmd = ['mock_mozilla', '-r', mock_target, '-q']
if cwd:
cmd += ['--cwd', cwd]
if not kwargs.get('privileged'):
cmd += ['--unpriv']
cmd += ['--shell']
if not isinstance(command, basestring):
command = subprocess.list2cmdline(command)
# XXX - Hack - gets around AB_CD=%(locale)s type arguments
command = command.replace("(", "\\(")
command = command.replace(")", "\\)")
if env:
env_cmd = ['/usr/bin/env']
for key, value in env.items():
# $HOME won't work inside the mock chroot
if key == 'HOME':
continue
value = value.replace(";", "\\;")
env_cmd += ['%s=%s' % (key, value)]
cmd.append(subprocess.list2cmdline(env_cmd) + " " + command)
else:
cmd.append(command)
return func(cmd, cwd=cwd, **kwargs)
def run_mock_command(self, mock_target, command, cwd=None, env=None, **kwargs):
"""Same as ScriptMixin.run_command, except runs command inside mock
environment `mock_target`."""
return self._do_mock_command(
super(MockMixin, self).run_command,
mock_target, command, cwd, env, **kwargs)
def get_mock_output_from_command(self, mock_target, command, cwd=None, env=None, **kwargs):
"""Same as ScriptMixin.get_output_from_command, except runs command
inside mock environment `mock_target`."""
return self._do_mock_command(
super(MockMixin, self).get_output_from_command,
mock_target, command, cwd, env, **kwargs)
def reset_mock(self, mock_target=None):
"""rm mock lock and reset"""
c = self.config
if mock_target is None:
if not c.get('mock_target'):
self.fatal("Cound not determine: 'mock_target'")
mock_target = c.get('mock_target')
buildroot_lock_path = os.path.join(c.get('mock_mozilla_dir', ''),
mock_target,
'buildroot.lock')
self.info("Removing buildroot lock at path if exists:O")
self.info(buildroot_lock_path)
if not os.path.exists(buildroot_lock_path):
self.info(ERROR_MSGS['undetermined_buildroot_lock'])
else:
rm_lock_cmd = ['rm', '-f', buildroot_lock_path]
super(MockMixin, self).run_command(rm_lock_cmd,
halt_on_failure=True,
fatal_exit_code=3)
cmd = ['mock_mozilla', '-r', mock_target, '--orphanskill']
return super(MockMixin, self).run_command(cmd, halt_on_failure=True,
fatal_exit_code=3)
def setup_mock(self, mock_target=None, mock_packages=None, mock_files=None):
"""Initializes and installs packages, copies files into mock
environment given by configuration in self.config. The mock
environment is given by self.config['mock_target'], the list of packges
to install given by self.config['mock_packages'], and the list of files
to copy in is self.config['mock_files']."""
if self.done_mock_setup or self.config.get('disable_mock'):
return
c = self.config
if mock_target is None:
assert 'mock_target' in c
t = c['mock_target']
else:
t = mock_target
self.default_mock_target = t
# Don't re-initialize mock if we're using the same packages as before
# Put the cache inside the mock root so that if somebody else resets
# the environment, it invalidates the cache
mock_root = super(MockMixin, self).get_output_from_command(
['mock_mozilla', '-r', t, '--print-root-path']
)
package_hash_file = os.path.join(mock_root, "builds/package_list.hash")
if os.path.exists(package_hash_file):
old_packages_hash = self.read_from_file(package_hash_file)
self.info("old package hash: %s" % old_packages_hash)
else:
self.info("no previous package list found")
old_packages_hash = None
if mock_packages is None:
mock_packages = list(c.get('mock_packages'))
package_list_hash = hashlib.new('sha1')
if mock_packages:
for p in sorted(mock_packages):
package_list_hash.update(p)
package_list_hash = package_list_hash.hexdigest()
did_init = True
# This simple hash comparison doesn't take into account depedency
# changes. If you really care about dependencies, then they should be
# explicitly listed in the package list.
if old_packages_hash != package_list_hash:
self.init_mock(t)
else:
self.info("Our list of packages hasn't changed; skipping re-initialization")
did_init = False
# Still try and install packages here since the package version may
# have been updated on the server
if mock_packages:
self.install_mock_packages(t, mock_packages)
# Save our list of packages
self.write_to_file(package_hash_file,
package_list_hash)
if mock_files is None:
mock_files = list(c.get('mock_files'))
if mock_files:
if not did_init:
# mock complains if you try and copy in files that already
# exist, so we need to delete them here first
self.info("Deleting old mock files")
self.delete_mock_files(t, mock_files)
self.copy_mock_files(t, mock_files)
self.done_mock_setup = True
def run_command_m(self, *args, **kwargs):
"""Executes self.run_mock_command if we have a mock target set,
otherwise executes self.run_command."""
mock_target = self.get_mock_target()
if mock_target:
self.setup_mock()
return self.run_mock_command(mock_target, *args, **kwargs)
else:
return super(MockMixin, self).run_command(*args, **kwargs)
def get_output_from_command_m(self, *args, **kwargs):
"""Executes self.get_mock_output_from_command if we have a mock target
set, otherwise executes self.get_output_from_command."""
mock_target = self.get_mock_target()
if mock_target:
self.setup_mock()
return self.get_mock_output_from_command(mock_target, *args, **kwargs)
else:
return super(MockMixin, self).get_output_from_command(*args, **kwargs)
| mpl-2.0 |
DenisCarriere/geocoder | tests/test_uscensus.py | 1 | 1697 | #!/usr/bin/python
# coding: utf8
import geocoder
import requests_mock
us_address = '595 Market St'
us_city = 'San Francisco'
us_state = 'CA'
us_zipcode = '94105'
us_locations = ['4650 Silver Hill Road, Suitland, MD 20746', '42 Chapel Street, New Haven']
def test_uscensus():
url = 'https://geocoding.geo.census.gov/geocoder/locations/onelineaddress?address=595+Market+St+San+Francisco+CA+94105&benchmark=4&format=json'
data_file = 'tests/results/uscensus.json'
with requests_mock.Mocker() as mocker, open(data_file, 'r') as input:
mocker.get(url, text=input.read())
g = geocoder.uscensus(' '.join([us_address, us_city, us_state, us_zipcode]), timeout=10)
assert g.ok
def test_uscensus_reverse():
url = 'https://geocoding.geo.census.gov/geocoder/geographies/coordinates?x=-77.016389&y=38.904722&benchmark=4&vintage=4&format=json'
data_file = 'tests/results/uscensus_reverse.json'
with requests_mock.Mocker() as mocker, open(data_file, 'r') as input:
mocker.get(url, text=input.read())
g = geocoder.uscensus((38.904722, -77.016389), method='reverse', timeout=10)
assert g.ok
def test_uscensus_reverse():
url = 'https://geocoding.geo.census.gov/geocoder/locations/addressbatch'
data_file = 'tests/results/uscensus_batch.csv'
with requests_mock.Mocker() as mocker, open(data_file, 'r') as input:
mocker.post(url, text=input.read())
g = geocoder.uscensus(us_locations, benchmark=9, method='batch')
assert g.ok
expected_results = [
[38.846638, -76.92681],
[41.30435, -72.89422]
]
assert [result.latlng for result in g] == expected_results
| mit |
sysadmin75/ansible | lib/ansible/module_utils/common/network.py | 41 | 4188 | # Copyright (c) 2016 Red Hat Inc
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
# General networking tools that may be used by all modules
import re
from struct import pack
from socket import inet_ntoa
from ansible.module_utils.six.moves import zip
VALID_MASKS = [2**8 - 2**i for i in range(0, 9)]
def is_netmask(val):
parts = str(val).split('.')
if not len(parts) == 4:
return False
for part in parts:
try:
if int(part) not in VALID_MASKS:
raise ValueError
except ValueError:
return False
return True
def is_masklen(val):
try:
return 0 <= int(val) <= 32
except ValueError:
return False
def to_netmask(val):
""" converts a masklen to a netmask """
if not is_masklen(val):
raise ValueError('invalid value for masklen')
bits = 0
for i in range(32 - int(val), 32):
bits |= (1 << i)
return inet_ntoa(pack('>I', bits))
def to_masklen(val):
""" converts a netmask to a masklen """
if not is_netmask(val):
raise ValueError('invalid value for netmask: %s' % val)
bits = list()
for x in val.split('.'):
octet = bin(int(x)).count('1')
bits.append(octet)
return sum(bits)
def to_subnet(addr, mask, dotted_notation=False):
""" coverts an addr / mask pair to a subnet in cidr notation """
try:
if not is_masklen(mask):
raise ValueError
cidr = int(mask)
mask = to_netmask(mask)
except ValueError:
cidr = to_masklen(mask)
addr = addr.split('.')
mask = mask.split('.')
network = list()
for s_addr, s_mask in zip(addr, mask):
network.append(str(int(s_addr) & int(s_mask)))
if dotted_notation:
return '%s %s' % ('.'.join(network), to_netmask(cidr))
return '%s/%s' % ('.'.join(network), cidr)
def to_ipv6_subnet(addr):
""" IPv6 addresses are eight groupings. The first four groupings (64 bits) comprise the subnet address. """
# https://tools.ietf.org/rfc/rfc2374.txt
# Split by :: to identify omitted zeros
ipv6_prefix = addr.split('::')[0]
# Get the first four groups, or as many as are found + ::
found_groups = []
for group in ipv6_prefix.split(':'):
found_groups.append(group)
if len(found_groups) == 4:
break
if len(found_groups) < 4:
found_groups.append('::')
# Concatenate network address parts
network_addr = ''
for group in found_groups:
if group != '::':
network_addr += str(group)
network_addr += str(':')
# Ensure network address ends with ::
if not network_addr.endswith('::'):
network_addr += str(':')
return network_addr
def to_ipv6_network(addr):
""" IPv6 addresses are eight groupings. The first three groupings (48 bits) comprise the network address. """
# Split by :: to identify omitted zeros
ipv6_prefix = addr.split('::')[0]
# Get the first three groups, or as many as are found + ::
found_groups = []
for group in ipv6_prefix.split(':'):
found_groups.append(group)
if len(found_groups) == 3:
break
if len(found_groups) < 3:
found_groups.append('::')
# Concatenate network address parts
network_addr = ''
for group in found_groups:
if group != '::':
network_addr += str(group)
network_addr += str(':')
# Ensure network address ends with ::
if not network_addr.endswith('::'):
network_addr += str(':')
return network_addr
def to_bits(val):
""" converts a netmask to bits """
bits = ''
for octet in val.split('.'):
bits += bin(int(octet))[2:].zfill(8)
return str
def is_mac(mac_address):
"""
Validate MAC address for given string
Args:
mac_address: string to validate as MAC address
Returns: (Boolean) True if string is valid MAC address, otherwise False
"""
mac_addr_regex = re.compile('[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$')
return bool(mac_addr_regex.match(mac_address.lower()))
| gpl-3.0 |
ArcherSys/ArcherSys | Lib/site-packages/nbconvert/filters/tests/test_ansi.py | 3 | 3329 | """
Module with tests for ansi filters
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from nbconvert.utils.coloransi import TermColors
from ...tests.base import TestsBase
from ..ansi import strip_ansi, ansi2html, ansi2latex
class TestAnsi(TestsBase):
"""Contains test functions for ansi.py"""
def test_strip_ansi(self):
"""strip_ansi test"""
correct_outputs = {
'%s%s%s' % (TermColors.Green, TermColors.White, TermColors.Red) : '',
'hello%s' % TermColors.Blue: 'hello',
'he%s%sllo' % (TermColors.Yellow, TermColors.Cyan) : 'hello',
'%shello' % TermColors.Blue : 'hello',
'{0}h{0}e{0}l{0}l{0}o{0}'.format(TermColors.Red) : 'hello',
'hel%slo' % TermColors.Green : 'hello',
'hello' : 'hello'}
for inval, outval in correct_outputs.items():
self._try_strip_ansi(inval, outval)
def _try_strip_ansi(self, inval, outval):
self.assertEqual(outval, strip_ansi(inval))
def test_ansi2html(self):
"""ansi2html test"""
correct_outputs = {
'%s' % (TermColors.Red) : '<span class="ansired"></span>',
'hello%s' % TermColors.Blue: 'hello<span class="ansiblue"></span>',
'he%s%sllo' % (TermColors.Green, TermColors.Cyan) : 'he<span class="ansigreen"></span><span class="ansicyan">llo</span>',
'%shello' % TermColors.Yellow : '<span class="ansiyellow">hello</span>',
'{0}h{0}e{0}l{0}l{0}o{0}'.format(TermColors.White) : '<span class="ansigrey">h</span><span class="ansigrey">e</span><span class="ansigrey">l</span><span class="ansigrey">l</span><span class="ansigrey">o</span><span class="ansigrey"></span>',
'hel%slo' % TermColors.Green : 'hel<span class="ansigreen">lo</span>',
'hello' : 'hello'}
for inval, outval in correct_outputs.items():
self._try_ansi2html(inval, outval)
def _try_ansi2html(self, inval, outval):
self.fuzzy_compare(outval, ansi2html(inval))
def test_ansi2latex(self):
"""ansi2latex test"""
correct_outputs = {
'%s' % (TermColors.Red) : r'{\color{red}}',
'hello%s' % TermColors.Blue: r'hello{\color{blue}}',
'he%s%sllo' % (TermColors.Green, TermColors.Cyan) : r'he{\color{green}}{\color{cyan}llo}',
'%shello' % TermColors.Yellow : r'\textbf{\color{yellow}hello}',
'{0}h{0}e{0}l{0}l{0}o{0}'.format(TermColors.White) : r'\textbf{\color{white}h}\textbf{\color{white}e}\textbf{\color{white}l}\textbf{\color{white}l}\textbf{\color{white}o}\textbf{\color{white}}',
'hel%slo' % TermColors.Green : r'hel{\color{green}lo}',
'hello' : 'hello',
u'hello\x1b[34mthere\x1b[mworld' : u'hello{\\color{blue}there}world',
u'hello\x1b[mthere': u'hellothere',
u'hello\x1b[01;34mthere' : u"hello\\textbf{\\color{lightblue}there}",
u'hello\x1b[001;34mthere' : u"hello\\textbf{\\color{lightblue}there}"
}
for inval, outval in correct_outputs.items():
self._try_ansi2latex(inval, outval)
def _try_ansi2latex(self, inval, outval):
self.fuzzy_compare(outval, ansi2latex(inval), case_sensitive=True)
| mit |
VaneCloud/horizon | openstack_dashboard/dashboards/admin/volumes/volume_types/qos_specs/forms.py | 63 | 3022 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
class CreateKeyValuePair(forms.SelfHandlingForm):
# this if for creating a spec key-value pair for an existing QOS Spec
key = forms.CharField(max_length=255, label=_("Key"))
value = forms.CharField(max_length=255, label=_("Value"))
def handle(self, request, data):
qos_spec_id = self.initial['qos_spec_id']
try:
# first retrieve current value of specs
specs = api.cinder.qos_spec_get(request, qos_spec_id)
# now add new key-value pair to list of specs
specs.specs[data['key']] = data['value']
api.cinder.qos_spec_set_keys(request,
qos_spec_id,
specs.specs)
msg = _('Created spec "%s".') % data['key']
messages.success(request, msg)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_("Unable to create spec."),
redirect=redirect)
class EditKeyValuePair(forms.SelfHandlingForm):
value = forms.CharField(max_length=255, label=_("Value"))
# update the backend with the new qos spec value
def handle(self, request, data):
key = self.initial['key']
qos_spec_id = self.initial['qos_spec_id']
# build up new 'specs' object with all previous values plus new value
try:
# first retrieve current value of specs
specs = api.cinder.qos_spec_get_keys(request,
qos_spec_id,
raw=True)
specs.specs[key] = data['value']
api.cinder.qos_spec_set_keys(request,
qos_spec_id,
specs.specs)
msg = _('Saved spec "%s".') % key
messages.success(request, msg)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_("Unable to edit spec."),
redirect=redirect)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.