repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jpshort/odoo | comunity_modules/attachment_preview/model/ir_attachment.py | 4 | 2843 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import collections
import os.path
import mimetypes
import base64
from openerp.osv.orm import Model
class IrAttachment(Model):
_inherit = 'ir.attachment'
def get_binary_extension(
self, cr, uid, model, ids, binary_field, filename_field=None,
context=None):
result = {}
for this in self.pool[model].browse(
cr, uid,
ids if isinstance(ids, collections.Iterable) else [ids],
context=context):
if not this.id:
result[this.id] = False
continue
extension = ''
if filename_field and this[filename_field]:
filename, extension = os.path.splitext(this[filename_field])
if not this[binary_field]:
result[this.id] = False
continue
if not extension:
try:
import magic
ms = magic.open(
hasattr(magic, 'MAGIC_MIME_TYPE')
and magic.MAGIC_MIME_TYPE or magic.MAGIC_MIME)
ms.load()
mimetype = ms.buffer(
base64.b64decode(this[binary_field]))
except ImportError:
(mimetype, encoding) = mimetypes.guess_type(
'data:;base64,' + this[binary_field], strict=False)
extension = mimetypes.guess_extension(
mimetype.split(';')[0], strict=False)
result[this.id] = (extension or '').lstrip('.').lower()
return result if isinstance(ids, collections.Iterable) else result[ids]
def get_attachment_extension(self, cr, uid, ids, context=None):
return self.get_binary_extension(
cr, uid, self._name, ids, 'datas', 'datas_fname', context=context)
| agpl-3.0 | 139,208,640,833,083,520 | 40.808824 | 79 | 0.553641 | false |
jamestwebber/scipy | scipy/optimize/_dual_annealing.py | 1 | 29742 | # Dual Annealing implementation.
# Copyright (c) 2018 Sylvain Gubian <[email protected]>,
# Yang Xiang <[email protected]>
# Author: Sylvain Gubian, Yang Xiang, PMP S.A.
"""
A Dual Annealing global optimization algorithm
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize import OptimizeResult
from scipy.optimize import minimize
from scipy.special import gammaln
from scipy._lib._util import check_random_state
__all__ = ['dual_annealing']
class VisitingDistribution(object):
"""
Class used to generate new coordinates based on the distorted
Cauchy-Lorentz distribution. Depending on the steps within the strategy
chain, the class implements the strategy for generating new location
changes.
Parameters
----------
lb : array_like
A 1-D NumPy ndarray containing lower bounds of the generated
components. Neither NaN or inf are allowed.
ub : array_like
A 1-D NumPy ndarray containing upper bounds for the generated
components. Neither NaN or inf are allowed.
visiting_param : float
Parameter for visiting distribution. Default value is 2.62.
Higher values give the visiting distribution a heavier tail, this
makes the algorithm jump to a more distant region.
The value range is (0, 3]. It's value is fixed for the life of the
object.
rand_state : `~numpy.random.mtrand.RandomState` object
A `~numpy.random.mtrand.RandomState` object for using the current state
of the created random generator container.
"""
TAIL_LIMIT = 1.e8
MIN_VISIT_BOUND = 1.e-10
def __init__(self, lb, ub, visiting_param, rand_state):
# if you wish to make _visiting_param adjustable during the life of
# the object then _factor2, _factor3, _factor5, _d1, _factor6 will
# have to be dynamically calculated in `visit_fn`. They're factored
# out here so they don't need to be recalculated all the time.
self._visiting_param = visiting_param
self.rand_state = rand_state
self.lower = lb
self.upper = ub
self.bound_range = ub - lb
# these are invariant numbers unless visiting_param changes
self._factor2 = np.exp((4.0 - self._visiting_param) * np.log(
self._visiting_param - 1.0))
self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0)
/ (self._visiting_param - 1.0))
self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * (
3.0 - self._visiting_param))
self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5
self._d1 = 2.0 - self._factor5
self._factor6 = np.pi * (1.0 - self._factor5) / np.sin(
np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1))
def visiting(self, x, step, temperature):
""" Based on the step in the strategy chain, new coordinated are
generated by changing all components is the same time or only
one of them, the new values are computed with visit_fn method
"""
dim = x.size
if step < dim:
# Changing all coordinates with a new visiting value
visits = self.visit_fn(temperature, dim)
upper_sample = self.rand_state.random_sample()
lower_sample = self.rand_state.random_sample()
visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample
visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample
x_visit = visits + x
a = x_visit - self.lower
b = np.fmod(a, self.bound_range) + self.bound_range
x_visit = np.fmod(b, self.bound_range) + self.lower
x_visit[np.fabs(
x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10
else:
# Changing only one coordinate at a time based on strategy
# chain step
x_visit = np.copy(x)
visit = self.visit_fn(temperature, 1)
if visit > self.TAIL_LIMIT:
visit = self.TAIL_LIMIT * self.rand_state.random_sample()
elif visit < -self.TAIL_LIMIT:
visit = -self.TAIL_LIMIT * self.rand_state.random_sample()
index = step - dim
x_visit[index] = visit + x[index]
a = x_visit[index] - self.lower[index]
b = np.fmod(a, self.bound_range[index]) + self.bound_range[index]
x_visit[index] = np.fmod(b, self.bound_range[
index]) + self.lower[index]
if np.fabs(x_visit[index] - self.lower[
index]) < self.MIN_VISIT_BOUND:
x_visit[index] += self.MIN_VISIT_BOUND
return x_visit
def visit_fn(self, temperature, dim):
""" Formula Visita from p. 405 of reference [2] """
x, y = self.rand_state.normal(size=(dim, 2)).T
factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0))
factor4 = self._factor4_p * factor1
# sigmax
x *= np.exp(-(self._visiting_param - 1.0) * np.log(
self._factor6 / factor4) / (3.0 - self._visiting_param))
den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) /
(3.0 - self._visiting_param))
return x / den
class EnergyState(object):
"""
Class used to record the energy state. At any time, it knows what is the
currently used coordinates and the most recent best location.
Parameters
----------
lower : array_like
A 1-D NumPy ndarray containing lower bounds for generating an initial
random components in the `reset` method.
upper : array_like
A 1-D NumPy ndarray containing upper bounds for generating an initial
random components in the `reset` method
components. Neither NaN or inf are allowed.
callback : callable, ``callback(x, f, context)``, optional
A callback function which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and `context` has value in [0, 1, 2]
"""
# Maximimum number of trials for generating a valid starting point
MAX_REINIT_COUNT = 1000
def __init__(self, lower, upper, callback=None):
self.ebest = None
self.current_energy = None
self.current_location = None
self.xbest = None
self.lower = lower
self.upper = upper
self.callback = callback
def reset(self, func_wrapper, rand_state, x0=None):
"""
Initialize current location is the search domain. If `x0` is not
provided, a random location within the bounds is generated.
"""
if x0 is None:
self.current_location = self.lower + rand_state.random_sample(
len(self.lower)) * (self.upper - self.lower)
else:
self.current_location = np.copy(x0)
init_error = True
reinit_counter = 0
while init_error:
self.current_energy = func_wrapper.fun(self.current_location)
if self.current_energy is None:
raise ValueError('Objective function is returning None')
if (not np.isfinite(self.current_energy) or np.isnan(
self.current_energy)):
if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
init_error = False
message = (
'Stopping algorithm because function '
'create NaN or (+/-) infinity values even with '
'trying new random parameters'
)
raise ValueError(message)
self.current_location = self.lower + rand_state.random_sample(
self.lower.size) * (self.upper - self.lower)
reinit_counter += 1
else:
init_error = False
# If first time reset, initialize ebest and xbest
if self.ebest is None and self.xbest is None:
self.ebest = self.current_energy
self.xbest = np.copy(self.current_location)
# Otherwise, we keep them in case of reannealing reset
def update_best(self, e, x, context):
self.ebest = e
self.xbest = np.copy(x)
if self.callback is not None:
val = self.callback(x, e, context)
if val is not None:
if val:
return('Callback function requested to stop early by '
'returning True')
def update_current(self, e, x):
self.current_energy = e
self.current_location = np.copy(x)
class StrategyChain(object):
"""
Class that implements within a Markov chain the strategy for location
acceptance and local search decision making.
Parameters
----------
acceptance_param : float
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
visit_dist : VisitingDistribution
Instance of `VisitingDistribution` class.
func_wrapper : ObjectiveFunWrapper
Instance of `ObjectiveFunWrapper` class.
minimizer_wrapper: LocalSearchWrapper
Instance of `LocalSearchWrapper` class.
rand_state : `~numpy.random.mtrand.RandomState` object
A `~numpy.random.mtrand.RandomState` object for using the current state
of the created random generator container.
energy_state: EnergyState
Instance of `EnergyState` class.
"""
def __init__(self, acceptance_param, visit_dist, func_wrapper,
minimizer_wrapper, rand_state, energy_state):
# Local strategy chain minimum energy and location
self.emin = energy_state.current_energy
self.xmin = np.array(energy_state.current_location)
# Global optimizer state
self.energy_state = energy_state
# Acceptance parameter
self.acceptance_param = acceptance_param
# Visiting distribution instance
self.visit_dist = visit_dist
# Wrapper to objective function
self.func_wrapper = func_wrapper
# Wrapper to the local minimizer
self.minimizer_wrapper = minimizer_wrapper
self.not_improved_idx = 0
self.not_improved_max_idx = 1000
self._rand_state = rand_state
self.temperature_step = 0
self.K = 100 * len(energy_state.current_location)
def accept_reject(self, j, e, x_visit):
r = self._rand_state.random_sample()
pqv_temp = (self.acceptance_param - 1.0) * (
e - self.energy_state.current_energy) / (
self.temperature_step + 1.)
if pqv_temp <= 0.:
pqv = 0.
else:
pqv = np.exp(np.log(pqv_temp) / (
1. - self.acceptance_param))
if r <= pqv:
# We accept the new location and update state
self.energy_state.update_current(e, x_visit)
self.xmin = np.copy(self.energy_state.current_location)
# No improvement for a long time
if self.not_improved_idx >= self.not_improved_max_idx:
if j == 0 or self.energy_state.current_energy < self.emin:
self.emin = self.energy_state.current_energy
self.xmin = np.copy(self.energy_state.current_location)
def run(self, step, temperature):
self.temperature_step = temperature / float(step + 1)
self.not_improved_idx += 1
for j in range(self.energy_state.current_location.size * 2):
if j == 0:
if step == 0:
self.energy_state_improved = True
else:
self.energy_state_improved = False
x_visit = self.visit_dist.visiting(
self.energy_state.current_location, j, temperature)
# Calling the objective function
e = self.func_wrapper.fun(x_visit)
if e < self.energy_state.current_energy:
# We have got a better energy value
self.energy_state.update_current(e, x_visit)
if e < self.energy_state.ebest:
val = self.energy_state.update_best(e, x_visit, 0)
if val is not None:
if val:
return val
self.energy_state_improved = True
self.not_improved_idx = 0
else:
# We have not improved but do we accept the new location?
self.accept_reject(j, e, x_visit)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during annealing')
# End of StrategyChain loop
def local_search(self):
# Decision making for performing a local search
# based on strategy chain results
# If energy has been improved or no improvement since too long,
# performing a local search with the best strategy chain location
if self.energy_state_improved:
# Global energy has improved, let's see if LS improves further
e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest,
self.energy_state.ebest)
if e < self.energy_state.ebest:
self.not_improved_idx = 0
val = self.energy_state.update_best(e, x, 1)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during local search')
# Check probability of a need to perform a LS even if no improvement
do_ls = False
if self.K < 90 * len(self.energy_state.current_location):
pls = np.exp(self.K * (
self.energy_state.ebest - self.energy_state.current_energy) /
self.temperature_step)
if pls >= self._rand_state.random_sample():
do_ls = True
# Global energy not improved, let's see what LS gives
# on the best strategy chain location
if self.not_improved_idx >= self.not_improved_max_idx:
do_ls = True
if do_ls:
e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin)
self.xmin = np.copy(x)
self.emin = e
self.not_improved_idx = 0
self.not_improved_max_idx = self.energy_state.current_location.size
if e < self.energy_state.ebest:
val = self.energy_state.update_best(
self.emin, self.xmin, 2)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during dual annealing')
class ObjectiveFunWrapper(object):
def __init__(self, func, maxfun=1e7, *args):
self.func = func
self.args = args
# Number of objective function evaluations
self.nfev = 0
# Number of gradient function evaluation if used
self.ngev = 0
# Number of hessian of the objective function if used
self.nhev = 0
self.maxfun = maxfun
def fun(self, x):
self.nfev += 1
return self.func(x, *self.args)
class LocalSearchWrapper(object):
"""
Class used to wrap around the minimizer used for local search
Default local minimizer is SciPy minimizer L-BFGS-B
"""
LS_MAXITER_RATIO = 6
LS_MAXITER_MIN = 100
LS_MAXITER_MAX = 1000
def __init__(self, bounds, func_wrapper, **kwargs):
self.func_wrapper = func_wrapper
self.kwargs = kwargs
self.minimizer = minimize
bounds_list = list(zip(*bounds))
self.lower = np.array(bounds_list[0])
self.upper = np.array(bounds_list[1])
# If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method
if not self.kwargs:
n = len(self.lower)
ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,
self.LS_MAXITER_MIN),
self.LS_MAXITER_MAX)
self.kwargs['method'] = 'L-BFGS-B'
self.kwargs['options'] = {
'maxiter': ls_max_iter,
}
self.kwargs['bounds'] = list(zip(self.lower, self.upper))
def local_search(self, x, e):
# Run local search from the given x location where energy value is e
x_tmp = np.copy(x)
mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)
if 'njev' in mres.keys():
self.func_wrapper.ngev += mres.njev
if 'nhev' in mres.keys():
self.func_wrapper.nhev += mres.nhev
# Check if is valid value
is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)
in_bounds = np.all(mres.x >= self.lower) and np.all(
mres.x <= self.upper)
is_valid = is_finite and in_bounds
# Use the new point only if it is valid and return a better results
if is_valid and mres.fun < e:
return mres.fun, mres.x
else:
return e, x_tmp
def dual_annealing(func, bounds, args=(), maxiter=1000,
local_search_options={}, initial_temp=5230.,
restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
maxfun=1e7, seed=None, no_local_search=False,
callback=None, x0=None):
"""
Find the global minimum of a function using Dual Annealing.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence, shape (n, 2)
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining bounds for the objective function parameter.
args : tuple, optional
Any additional fixed parameters needed to completely specify the
objective function.
maxiter : int, optional
The maximum number of global search iterations. Default value is 1000.
local_search_options : dict, optional
Extra keyword arguments to be passed to the local minimizer
(`minimize`). Some important options could be:
``method`` for the minimizer method to use and ``args`` for
objective function additional arguments.
initial_temp : float, optional
The initial temperature, use higher values to facilitates a wider
search of the energy landscape, allowing dual_annealing to escape
local minima that it is trapped in. Default value is 5230. Range is
(0.01, 5.e4].
restart_temp_ratio : float, optional
During the annealing process, temperature is decreasing, when it
reaches ``initial_temp * restart_temp_ratio``, the reannealing process
is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
visit : float, optional
Parameter for visiting distribution. Default value is 2.62. Higher
values give the visiting distribution a heavier tail, this makes
the algorithm jump to a more distant region. The value range is (0, 3].
accept : float, optional
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
maxfun : int, optional
Soft limit for the number of objective function calls. If the
algorithm is in the middle of a local search, this number will be
exceeded, the algorithm will stop just after the local search is
done. Default value is 1e7.
seed : {int or `~numpy.random.mtrand.RandomState` instance}, optional
If `seed` is not specified the `~numpy.random.mtrand.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``RandomState`` instance, then that
instance is used.
Specify `seed` for repeatable minimizations. The random numbers
generated with this seed only affect the visiting distribution
function and new coordinates generation.
no_local_search : bool, optional
If `no_local_search` is set to True, a traditional Generalized
Simulated Annealing will be performed with no local search
strategy applied.
callback : callable, optional
A callback function with signature ``callback(x, f, context)``,
which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and ``context`` has value in [0, 1, 2], with the
following meaning:
- 0: minimum detected in the annealing process.
- 1: detection occurred in the local search process.
- 2: detection done in the dual annealing process.
If the callback implementation returns True, the algorithm will stop.
x0 : ndarray, shape(n,), optional
Coordinates of a single N-D starting point.
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``fun`` the value
of the function at the solution, and ``message`` which describes the
cause of the termination.
See `OptimizeResult` for a description of other attributes.
Notes
-----
This function implements the Dual Annealing optimization. This stochastic
approach derived from [3]_ combines the generalization of CSA (Classical
Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
to a strategy for applying a local search on accepted locations [4]_.
An alternative implementation of this same algorithm is described in [5]_
and benchmarks are presented in [6]_. This approach introduces an advanced
method to refine the solution found by the generalized annealing
process. This algorithm uses a distorted Cauchy-Lorentz visiting
distribution, with its shape controlled by the parameter :math:`q_{v}`
.. math::
g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
\\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
\\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\
\\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
\\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
Where :math:`t` is the artificial time. This visiting distribution is used
to generate a trial jump distance :math:`\\Delta x(t)` of variable
:math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
From the starting point, after calling the visiting distribution
function, the acceptance probability is computed as follows:
.. math::
p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
\\frac{1}{1-q_{a}}}\\}}
Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
acceptance probability is assigned to the cases where
.. math::
[1-(1-q_{a}) \\beta \\Delta E] < 0
The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
.. math::
T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
1 + t\\right)^{q_{v}-1}-1}
Where :math:`q_{v}` is the visiting parameter.
.. versionadded:: 1.2.0
References
----------
.. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs
statistics. Journal of Statistical Physics, 52, 479-487 (1998).
.. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.
Physica A, 233, 395-406 (1996).
.. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated
Annealing Algorithm and Its Application to the Thomson Model.
Physics Letters A, 233, 216-220 (1997).
.. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated
Annealing. Physical Review E, 62, 4473 (2000).
.. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized
Simulated Annealing for Efficient Global Optimization: the GenSA
Package for R. The R Journal, Volume 5/1 (2013).
.. [6] Mullen, K. Continuous Global Optimization in R. Journal of
Statistical Software, 60(6), 1 - 45, (2014). DOI:10.18637/jss.v060.i06
Examples
--------
The following example is a 10-D problem, with many local minima.
The function involved is called Rastrigin
(https://en.wikipedia.org/wiki/Rastrigin_function)
>>> from scipy.optimize import dual_annealing
>>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)
>>> lw = [-5.12] * 10
>>> up = [5.12] * 10
>>> ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234)
>>> print("global minimum: xmin = {0}, f(xmin) = {1:.6f}".format(
... ret.x, ret.fun))
global minimum: xmin = [-4.26437714e-09 -3.91699361e-09 -1.86149218e-09 -3.97165720e-09
-6.29151648e-09 -6.53145322e-09 -3.93616815e-09 -6.55623025e-09
-6.05775280e-09 -5.00668935e-09], f(xmin) = 0.000000
""" # noqa: E501
if x0 is not None and not len(x0) == len(bounds):
raise ValueError('Bounds size does not match x0')
lu = list(zip(*bounds))
lower = np.array(lu[0])
upper = np.array(lu[1])
# Check that restart temperature ratio is correct
if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:
raise ValueError('Restart temperature ratio has to be in range (0, 1)')
# Checking bounds are valid
if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(
np.isnan(lower)) or np.any(np.isnan(upper))):
raise ValueError('Some bounds values are inf values or nan values')
# Checking that bounds are consistent
if not np.all(lower < upper):
raise ValueError('Bounds are not consistent min < max')
# Checking that bounds are the same length
if not len(lower) == len(upper):
raise ValueError('Bounds do not have the same dimensions')
# Wrapper for the objective function
func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)
# Wrapper fot the minimizer
minimizer_wrapper = LocalSearchWrapper(
bounds, func_wrapper, **local_search_options)
# Initialization of RandomState for reproducible runs if seed provided
rand_state = check_random_state(seed)
# Initialization of the energy state
energy_state = EnergyState(lower, upper, callback)
energy_state.reset(func_wrapper, rand_state, x0)
# Minimum value of annealing temperature reached to perform
# re-annealing
temperature_restart = initial_temp * restart_temp_ratio
# VisitingDistribution instance
visit_dist = VisitingDistribution(lower, upper, visit, rand_state)
# Strategy chain instance
strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,
minimizer_wrapper, rand_state, energy_state)
need_to_stop = False
iteration = 0
message = []
# OptimizeResult object to be returned
optimize_res = OptimizeResult()
optimize_res.success = True
optimize_res.status = 0
t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0
# Run the search loop
while(not need_to_stop):
for i in range(maxiter):
# Compute temperature for this step
s = float(i) + 2.0
t2 = np.exp((visit - 1) * np.log(s)) - 1.0
temperature = initial_temp * t1 / t2
if iteration >= maxiter:
message.append("Maximum number of iteration reached")
need_to_stop = True
break
# Need a re-annealing process?
if temperature < temperature_restart:
energy_state.reset(func_wrapper, rand_state)
break
# starting strategy chain
val = strategy_chain.run(i, temperature)
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
# Possible local search at the end of the strategy chain
if not no_local_search:
val = strategy_chain.local_search()
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
iteration += 1
# Setting the OptimizeResult values
optimize_res.x = energy_state.xbest
optimize_res.fun = energy_state.ebest
optimize_res.nit = iteration
optimize_res.nfev = func_wrapper.nfev
optimize_res.njev = func_wrapper.ngev
optimize_res.nhev = func_wrapper.nhev
optimize_res.message = message
return optimize_res
| bsd-3-clause | 1,399,627,725,223,141,400 | 42.166909 | 91 | 0.601405 | false |
xpansa/account-financial-tools | account_invoice_tax_required/__openerp__.py | 13 | 1253 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author Vincent Renaville. Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Tax required in invoice",
'version': "8.0.1.0.0",
"author": "Camptocamp,Odoo Community Association (OCA)",
'website': "http://www.camptocamp.com",
'category': "Localisation / Accounting",
'license': "AGPL-3",
'depends': ["account"],
'data': [
],
'installable': True,
}
| agpl-3.0 | 8,236,022,747,232,913,000 | 39.419355 | 78 | 0.592179 | false |
sourcepole/kadas-albireo | python/plugins/processing/algs/qgis/FixedDistanceBuffer.py | 8 | 2865 | # -*- coding: utf-8 -*-
"""
***************************************************************************
FixedDistanceBuffer.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import QGis
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputVector
import Buffer as buff
from processing.tools import dataobjects
class FixedDistanceBuffer(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
FIELD = 'FIELD'
DISTANCE = 'DISTANCE'
SEGMENTS = 'SEGMENTS'
DISSOLVE = 'DISSOLVE'
def defineCharacteristics(self):
self.name = 'Fixed distance buffer'
self.group = 'Vector geometry tools'
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterNumber(self.DISTANCE,
self.tr('Distance'), default=10.0))
self.addParameter(ParameterNumber(self.SEGMENTS,
self.tr('Segments'), 1, default=5))
self.addParameter(ParameterBoolean(self.DISSOLVE,
self.tr('Dissolve result'), False))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Buffer')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT))
distance = self.getParameterValue(self.DISTANCE)
dissolve = self.getParameterValue(self.DISSOLVE)
segments = int(self.getParameterValue(self.SEGMENTS))
writer = self.getOutputFromName(
self.OUTPUT).getVectorWriter(layer.pendingFields().toList(),
QGis.WKBPolygon, layer.crs())
buff.buffering(progress, writer, distance, None, False, layer,
dissolve, segments)
| gpl-2.0 | -5,116,185,953,834,948,000 | 38.791667 | 80 | 0.572426 | false |
bq/bitbloq-offline | app/res/web2board/linux/platformio/builder/scripts/frameworks/energia.py | 5 | 2025 | # Copyright 2014-2015 Ivan Kravets <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Energia
Energia framework enables pretty much anyone to start easily creating
microcontroller-based projects and applications. Its easy-to-use libraries
and functions provide developers of all experience levels to start
blinking LEDs, buzzing buzzers and sensing sensors more quickly than ever
before.
http://energia.nu/reference/
"""
from os.path import join
from SCons.Script import DefaultEnvironment
env = DefaultEnvironment()
env.Replace(
PLATFORMFW_DIR=join("$PIOPACKAGES_DIR", "framework-energia${PLATFORM[2:]}")
)
ENERGIA_VERSION = int(
open(join(env.subst("$PLATFORMFW_DIR"),
"version.txt")).read().replace(".", "").strip())
# include board variant
env.VariantDirWrap(
join("$BUILD_DIR", "FrameworkEnergiaVariant"),
join("$PLATFORMFW_DIR", "variants", "${BOARD_OPTIONS['build']['variant']}")
)
env.Append(
CPPDEFINES=[
"ARDUINO=101",
"ENERGIA=%d" % ENERGIA_VERSION
],
CPPPATH=[
join("$BUILD_DIR", "FrameworkEnergia"),
join("$BUILD_DIR", "FrameworkEnergiaVariant")
]
)
if env.get("BOARD_OPTIONS", {}).get("build", {}).get("core") == "lm4f":
env.Append(
LINKFLAGS=["-Wl,--entry=ResetISR"]
)
#
# Target: Build Core Library
#
libs = []
libs.append(env.BuildLibrary(
join("$BUILD_DIR", "FrameworkEnergia"),
join("$PLATFORMFW_DIR", "cores", "${BOARD_OPTIONS['build']['core']}")
))
env.Append(LIBS=libs)
| gpl-3.0 | -8,511,220,934,102,577,000 | 26.364865 | 79 | 0.693827 | false |
tudorvio/nova | nova/tests/unit/cells/test_cells_rpcapi.py | 20 | 32450 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Cells RPCAPI
"""
from oslo_config import cfg
import six
from nova.cells import rpcapi as cells_rpcapi
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
CONF = cfg.CONF
CONF.import_opt('topic', 'nova.cells.opts', group='cells')
class CellsAPITestCase(test.NoDBTestCase):
"""Test case for cells.api interfaces."""
def setUp(self):
super(CellsAPITestCase, self).setUp()
self.fake_topic = 'fake_topic'
self.fake_context = 'fake_context'
self.flags(topic=self.fake_topic, enable=True, group='cells')
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _stub_rpc_method(self, rpc_method, result):
call_info = {}
orig_prepare = self.cells_rpcapi.client.prepare
def fake_rpc_prepare(**kwargs):
if 'version' in kwargs:
call_info['version'] = kwargs.pop('version')
return self.cells_rpcapi.client
def fake_csv(version):
return orig_prepare(version).can_send_version()
def fake_rpc_method(ctxt, method, **kwargs):
call_info['context'] = ctxt
call_info['method'] = method
call_info['args'] = kwargs
return result
self.stubs.Set(self.cells_rpcapi.client, 'prepare', fake_rpc_prepare)
self.stubs.Set(self.cells_rpcapi.client, 'can_send_version', fake_csv)
self.stubs.Set(self.cells_rpcapi.client, rpc_method, fake_rpc_method)
return call_info
def _check_result(self, call_info, method, args, version=None):
self.assertEqual(self.cells_rpcapi.client.target.topic,
self.fake_topic)
self.assertEqual(self.fake_context, call_info['context'])
self.assertEqual(method, call_info['method'])
self.assertEqual(args, call_info['args'])
if version is not None:
self.assertIn('version', call_info)
self.assertIsInstance(call_info['version'], six.string_types,
msg="Message version %s is not a string" %
call_info['version'])
self.assertEqual(version, call_info['version'])
else:
self.assertNotIn('version', call_info)
def test_cast_compute_api_method(self):
fake_cell_name = 'fake_cell_name'
fake_method = 'fake_method'
fake_method_args = (1, 2)
fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
expected_method_info = {'method': fake_method,
'method_args': fake_method_args,
'method_kwargs': fake_method_kwargs}
expected_args = {'method_info': expected_method_info,
'cell_name': fake_cell_name,
'call': False}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.cast_compute_api_method(self.fake_context,
fake_cell_name, fake_method,
*fake_method_args, **fake_method_kwargs)
self._check_result(call_info, 'run_compute_api_method',
expected_args)
def test_call_compute_api_method(self):
fake_cell_name = 'fake_cell_name'
fake_method = 'fake_method'
fake_method_args = (1, 2)
fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
fake_response = 'fake_response'
expected_method_info = {'method': fake_method,
'method_args': fake_method_args,
'method_kwargs': fake_method_kwargs}
expected_args = {'method_info': expected_method_info,
'cell_name': fake_cell_name,
'call': True}
call_info = self._stub_rpc_method('call', fake_response)
result = self.cells_rpcapi.call_compute_api_method(self.fake_context,
fake_cell_name, fake_method,
*fake_method_args, **fake_method_kwargs)
self._check_result(call_info, 'run_compute_api_method',
expected_args)
self.assertEqual(fake_response, result)
def test_build_instances(self):
call_info = self._stub_rpc_method('cast', None)
instances = [objects.Instance(id=1),
objects.Instance(id=2)]
self.cells_rpcapi.build_instances(
self.fake_context, instances=instances,
image={'fake': 'image'}, arg1=1, arg2=2, arg3=3)
expected_args = {'build_inst_kwargs': {'instances': instances,
'image': {'fake': 'image'},
'arg1': 1,
'arg2': 2,
'arg3': 3}}
self._check_result(call_info, 'build_instances',
expected_args, version='1.34')
def test_get_capacities(self):
capacity_info = {"capacity": "info"}
call_info = self._stub_rpc_method('call',
result=capacity_info)
result = self.cells_rpcapi.get_capacities(self.fake_context,
cell_name="name")
self._check_result(call_info, 'get_capacities',
{'cell_name': 'name'}, version='1.9')
self.assertEqual(capacity_info, result)
def test_instance_update_at_top(self):
fake_info_cache = objects.InstanceInfoCache(instance_uuid='fake-uuid')
fake_sys_metadata = {'key1': 'value1',
'key2': 'value2'}
fake_attrs = {'id': 2,
'cell_name': 'fake',
'metadata': {'fake': 'fake'},
'info_cache': fake_info_cache,
'system_metadata': fake_sys_metadata}
fake_instance = objects.Instance(**fake_attrs)
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_update_at_top(
self.fake_context, fake_instance)
expected_args = {'instance': fake_instance}
self._check_result(call_info, 'instance_update_at_top',
expected_args, version='1.35')
def test_instance_destroy_at_top(self):
fake_instance = objects.Instance(uuid='fake-uuid')
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_destroy_at_top(
self.fake_context, fake_instance)
expected_args = {'instance': fake_instance}
self._check_result(call_info, 'instance_destroy_at_top',
expected_args, version='1.35')
def test_instance_delete_everywhere(self):
instance = fake_instance.fake_instance_obj(self.fake_context)
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_delete_everywhere(
self.fake_context, instance,
'fake-type')
expected_args = {'instance': instance,
'delete_type': 'fake-type'}
self._check_result(call_info, 'instance_delete_everywhere',
expected_args, version='1.27')
def test_instance_fault_create_at_top(self):
fake_instance_fault = {'id': 2,
'other': 'meow'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_fault_create_at_top(
self.fake_context, fake_instance_fault)
expected_args = {'instance_fault': fake_instance_fault}
self._check_result(call_info, 'instance_fault_create_at_top',
expected_args)
def test_bw_usage_update_at_top(self):
update_args = ('fake_uuid', 'fake_mac', 'fake_start_period',
'fake_bw_in', 'fake_bw_out', 'fake_ctr_in',
'fake_ctr_out')
update_kwargs = {'last_refreshed': 'fake_refreshed'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bw_usage_update_at_top(
self.fake_context, *update_args, **update_kwargs)
bw_update_info = {'uuid': 'fake_uuid',
'mac': 'fake_mac',
'start_period': 'fake_start_period',
'bw_in': 'fake_bw_in',
'bw_out': 'fake_bw_out',
'last_ctr_in': 'fake_ctr_in',
'last_ctr_out': 'fake_ctr_out',
'last_refreshed': 'fake_refreshed'}
expected_args = {'bw_update_info': bw_update_info}
self._check_result(call_info, 'bw_usage_update_at_top',
expected_args)
def test_get_cell_info_for_neighbors(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.get_cell_info_for_neighbors(
self.fake_context)
self._check_result(call_info, 'get_cell_info_for_neighbors', {},
version='1.1')
self.assertEqual(result, 'fake_response')
def test_sync_instances(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.sync_instances(self.fake_context,
project_id='fake_project', updated_since='fake_time',
deleted=True)
expected_args = {'project_id': 'fake_project',
'updated_since': 'fake_time',
'deleted': True}
self._check_result(call_info, 'sync_instances', expected_args,
version='1.1')
def test_service_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
fake_filters = {'key1': 'val1', 'key2': 'val2'}
result = self.cells_rpcapi.service_get_all(self.fake_context,
filters=fake_filters)
expected_args = {'filters': fake_filters}
self._check_result(call_info, 'service_get_all', expected_args,
version='1.2')
self.assertEqual(result, 'fake_response')
def test_service_get_by_compute_host(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.service_get_by_compute_host(
self.fake_context, host_name='fake-host-name')
expected_args = {'host_name': 'fake-host-name'}
self._check_result(call_info, 'service_get_by_compute_host',
expected_args,
version='1.2')
self.assertEqual(result, 'fake_response')
def test_get_host_uptime(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.get_host_uptime(
self.fake_context, host_name='fake-host-name')
expected_args = {'host_name': 'fake-host-name'}
self._check_result(call_info, 'get_host_uptime',
expected_args,
version='1.17')
self.assertEqual(result, 'fake_response')
def test_service_update(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.service_update(
self.fake_context, host_name='fake-host-name',
binary='nova-api', params_to_update={'disabled': True})
expected_args = {
'host_name': 'fake-host-name',
'binary': 'nova-api',
'params_to_update': {'disabled': True}}
self._check_result(call_info, 'service_update',
expected_args,
version='1.7')
self.assertEqual(result, 'fake_response')
def test_service_delete(self):
call_info = self._stub_rpc_method('call', None)
cell_service_id = 'cell@id'
result = self.cells_rpcapi.service_delete(
self.fake_context, cell_service_id=cell_service_id)
expected_args = {'cell_service_id': cell_service_id}
self._check_result(call_info, 'service_delete',
expected_args, version='1.26')
self.assertIsNone(result)
def test_proxy_rpc_to_manager(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.proxy_rpc_to_manager(
self.fake_context, rpc_message='fake-msg',
topic='fake-topic', call=True, timeout=-1)
expected_args = {'rpc_message': 'fake-msg',
'topic': 'fake-topic',
'call': True,
'timeout': -1}
self._check_result(call_info, 'proxy_rpc_to_manager',
expected_args,
version='1.2')
self.assertEqual(result, 'fake_response')
def test_task_log_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.task_log_get_all(self.fake_context,
task_name='fake_name',
period_beginning='fake_begin',
period_ending='fake_end',
host='fake_host',
state='fake_state')
expected_args = {'task_name': 'fake_name',
'period_beginning': 'fake_begin',
'period_ending': 'fake_end',
'host': 'fake_host',
'state': 'fake_state'}
self._check_result(call_info, 'task_log_get_all', expected_args,
version='1.3')
self.assertEqual(result, 'fake_response')
def test_compute_node_get_all(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_get_all(self.fake_context,
hypervisor_match='fake-match')
expected_args = {'hypervisor_match': 'fake-match'}
self._check_result(call_info, 'compute_node_get_all', expected_args,
version='1.4')
self.assertEqual(result, 'fake_response')
def test_compute_node_stats(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_stats(self.fake_context)
expected_args = {}
self._check_result(call_info, 'compute_node_stats',
expected_args, version='1.4')
self.assertEqual(result, 'fake_response')
def test_compute_node_get(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.compute_node_get(self.fake_context,
'fake_compute_id')
expected_args = {'compute_id': 'fake_compute_id'}
self._check_result(call_info, 'compute_node_get',
expected_args, version='1.4')
self.assertEqual(result, 'fake_response')
def test_actions_get(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.actions_get(self.fake_context,
fake_instance)
expected_args = {'cell_name': 'region!child',
'instance_uuid': fake_instance['uuid']}
self._check_result(call_info, 'actions_get', expected_args,
version='1.5')
self.assertEqual(result, 'fake_response')
def test_actions_get_no_cell(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.actions_get, self.fake_context,
fake_instance)
def test_action_get_by_request_id(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.action_get_by_request_id(self.fake_context,
fake_instance,
'req-fake')
expected_args = {'cell_name': 'region!child',
'instance_uuid': fake_instance['uuid'],
'request_id': 'req-fake'}
self._check_result(call_info, 'action_get_by_request_id',
expected_args, version='1.5')
self.assertEqual(result, 'fake_response')
def test_action_get_by_request_id_no_cell(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.action_get_by_request_id,
self.fake_context, fake_instance, 'req-fake')
def test_action_events_get(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.action_events_get(self.fake_context,
fake_instance,
'fake-action')
expected_args = {'cell_name': 'region!child',
'action_id': 'fake-action'}
self._check_result(call_info, 'action_events_get', expected_args,
version='1.5')
self.assertEqual(result, 'fake_response')
def test_action_events_get_no_cell(self):
fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
self.assertRaises(exception.InstanceUnknownCell,
self.cells_rpcapi.action_events_get,
self.fake_context, fake_instance, 'fake-action')
def test_consoleauth_delete_tokens(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.consoleauth_delete_tokens(self.fake_context,
'fake-uuid')
expected_args = {'instance_uuid': 'fake-uuid'}
self._check_result(call_info, 'consoleauth_delete_tokens',
expected_args, version='1.6')
def test_validate_console_port(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.validate_console_port(self.fake_context,
'fake-uuid', 'fake-port', 'fake-type')
expected_args = {'instance_uuid': 'fake-uuid',
'console_port': 'fake-port',
'console_type': 'fake-type'}
self._check_result(call_info, 'validate_console_port',
expected_args, version='1.6')
self.assertEqual(result, 'fake_response')
def test_bdm_update_or_create_at_top(self):
fake_bdm = {'id': 2, 'other': 'meow'}
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bdm_update_or_create_at_top(
self.fake_context, fake_bdm, create='fake-create')
expected_args = {'bdm': fake_bdm, 'create': 'fake-create'}
self._check_result(call_info, 'bdm_update_or_create_at_top',
expected_args, version='1.28')
def test_bdm_destroy_at_top(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.bdm_destroy_at_top(self.fake_context,
'fake-uuid',
device_name='fake-device',
volume_id='fake-vol')
expected_args = {'instance_uuid': 'fake-uuid',
'device_name': 'fake-device',
'volume_id': 'fake-vol'}
self._check_result(call_info, 'bdm_destroy_at_top',
expected_args, version='1.10')
def test_get_migrations(self):
call_info = self._stub_rpc_method('call', None)
filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
self.cells_rpcapi.get_migrations(self.fake_context, filters)
expected_args = {'filters': filters}
self._check_result(call_info, 'get_migrations', expected_args,
version="1.11")
def test_instance_update_from_api(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.instance_update_from_api(
self.fake_context, 'fake-instance',
expected_vm_state='exp_vm',
expected_task_state='exp_task',
admin_state_reset='admin_reset')
expected_args = {'instance': 'fake-instance',
'expected_vm_state': 'exp_vm',
'expected_task_state': 'exp_task',
'admin_state_reset': 'admin_reset'}
self._check_result(call_info, 'instance_update_from_api',
expected_args, version='1.16')
def test_start_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.start_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'start_instance',
expected_args, version='1.12')
def test_stop_instance_cast(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.stop_instance(
self.fake_context, 'fake-instance', do_cast=True,
clean_shutdown=True)
expected_args = {'instance': 'fake-instance',
'do_cast': True,
'clean_shutdown': True}
self._check_result(call_info, 'stop_instance',
expected_args, version='1.31')
def test_stop_instance_call(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.stop_instance(
self.fake_context, 'fake-instance', do_cast=False,
clean_shutdown=True)
expected_args = {'instance': 'fake-instance',
'do_cast': False,
'clean_shutdown': True}
self._check_result(call_info, 'stop_instance',
expected_args, version='1.31')
self.assertEqual(result, 'fake_response')
def test_cell_create(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_create(self.fake_context, 'values')
expected_args = {'values': 'values'}
self._check_result(call_info, 'cell_create',
expected_args, version='1.13')
self.assertEqual(result, 'fake_response')
def test_cell_update(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_update(self.fake_context,
'cell_name', 'values')
expected_args = {'cell_name': 'cell_name',
'values': 'values'}
self._check_result(call_info, 'cell_update',
expected_args, version='1.13')
self.assertEqual(result, 'fake_response')
def test_cell_delete(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_delete(self.fake_context,
'cell_name')
expected_args = {'cell_name': 'cell_name'}
self._check_result(call_info, 'cell_delete',
expected_args, version='1.13')
self.assertEqual(result, 'fake_response')
def test_cell_get(self):
call_info = self._stub_rpc_method('call', 'fake_response')
result = self.cells_rpcapi.cell_get(self.fake_context,
'cell_name')
expected_args = {'cell_name': 'cell_name'}
self._check_result(call_info, 'cell_get',
expected_args, version='1.13')
self.assertEqual(result, 'fake_response')
def test_reboot_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.reboot_instance(
self.fake_context, 'fake-instance',
block_device_info='ignored', reboot_type='HARD')
expected_args = {'instance': 'fake-instance',
'reboot_type': 'HARD'}
self._check_result(call_info, 'reboot_instance',
expected_args, version='1.14')
def test_pause_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.pause_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'pause_instance',
expected_args, version='1.19')
def test_unpause_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.unpause_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'unpause_instance',
expected_args, version='1.19')
def test_suspend_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.suspend_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'suspend_instance',
expected_args, version='1.15')
def test_resume_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.resume_instance(
self.fake_context, 'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'resume_instance',
expected_args, version='1.15')
def test_terminate_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.terminate_instance(self.fake_context,
'fake-instance', [])
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'terminate_instance',
expected_args, version='1.18')
def test_soft_delete_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.soft_delete_instance(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'soft_delete_instance',
expected_args, version='1.18')
def test_resize_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.resize_instance(self.fake_context,
'fake-instance',
dict(cow='moo'),
'fake-hint',
'fake-flavor',
'fake-reservations',
clean_shutdown=True)
expected_args = {'instance': 'fake-instance',
'flavor': 'fake-flavor',
'extra_instance_updates': dict(cow='moo'),
'clean_shutdown': True}
self._check_result(call_info, 'resize_instance',
expected_args, version='1.33')
def test_live_migrate_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.live_migrate_instance(self.fake_context,
'fake-instance',
'fake-host',
'fake-block',
'fake-commit')
expected_args = {'instance': 'fake-instance',
'block_migration': 'fake-block',
'disk_over_commit': 'fake-commit',
'host_name': 'fake-host'}
self._check_result(call_info, 'live_migrate_instance',
expected_args, version='1.20')
def test_revert_resize(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.revert_resize(self.fake_context,
'fake-instance',
'fake-migration',
'fake-dest',
'resvs')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'revert_resize',
expected_args, version='1.21')
def test_confirm_resize(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.confirm_resize(self.fake_context,
'fake-instance',
'fake-migration',
'fake-source',
'resvs')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'confirm_resize',
expected_args, version='1.21')
def test_reset_network(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.reset_network(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'reset_network',
expected_args, version='1.22')
def test_inject_network_info(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.inject_network_info(self.fake_context,
'fake-instance')
expected_args = {'instance': 'fake-instance'}
self._check_result(call_info, 'inject_network_info',
expected_args, version='1.23')
def test_snapshot_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.snapshot_instance(self.fake_context,
'fake-instance',
'image-id')
expected_args = {'instance': 'fake-instance',
'image_id': 'image-id'}
self._check_result(call_info, 'snapshot_instance',
expected_args, version='1.24')
def test_backup_instance(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.backup_instance(self.fake_context,
'fake-instance',
'image-id',
'backup-type',
'rotation')
expected_args = {'instance': 'fake-instance',
'image_id': 'image-id',
'backup_type': 'backup-type',
'rotation': 'rotation'}
self._check_result(call_info, 'backup_instance',
expected_args, version='1.24')
def test_set_admin_password(self):
call_info = self._stub_rpc_method('cast', None)
self.cells_rpcapi.set_admin_password(self.fake_context,
'fake-instance', 'fake-password')
expected_args = {'instance': 'fake-instance',
'new_pass': 'fake-password'}
self._check_result(call_info, 'set_admin_password',
expected_args, version='1.29')
| apache-2.0 | -384,949,060,984,185,300 | 41.697368 | 78 | 0.531772 | false |
tjhei/burnman_old2 | burnman/geotherm.py | 1 | 4203 | # BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
import numpy as np
import matplotlib.pyplot as pyplot
import scipy.integrate as integrate
import burnman
from tools import *
def watson_baxter(pressure):
"""
polynomial fit from Watson, Baxter, EPSL, 2007
pressure: in Pa
returns: temperature in K
"""
temperature = np.empty_like(pressure)
for i in range(len(pressure)):
if (pressure[i] <= 15.e9):
temperature[i] = 1900.-1420.*pow(0.8,pressure[i]/1.e9)
else:
temperature[i] = 1680.+11.1*pressure[i]/1.e9
return temperature
def brown_shankland(pressure):
"""
geotherm from Brown and Shankland 1981
pressure: in Pa
returns: temperature in K
"""
temperature = np.empty_like(pressure)
for i in range(len(pressure)):
depth = burnman.seismic.prem_model.depth(pressure[i])
temperature[i] = lookup_and_interpolate(table_brown_depth, table_brown_temperature, depth)
return temperature
# geotherm from Anderson 1982
def anderson(pressure):
"""
geotherm from Anderson 1982
pressure: in Pa
returns: temperature in K
"""
temperature = np.empty_like(pressure)
for i in range(len(pressure)):
depth = burnman.seismic.prem_model.depth(pressure[i])
temperature[i] = lookup_and_interpolate(table_anderson_depth, table_anderson_temperature, depth)
return temperature
def adiabatic(pressures, T0, rock):
"""
This integrates dT/dP = gr * T / K_s in order to get a mantle adiabat
at the pressures given. Takes pressures in Pa, as well as an anchor
temperature corresponding to the first pressure in the list. The third
argument is an instance or burnman.composite, which is the material
for which we compute the adiabat. For more info see the documentation
on dTdP
Returns: a list of temperatures [K] for each of the pressures [Pa]
"""
temperatures = integrate.odeint(lambda t,p : dTdP(t,p,rock), T0, pressures)
return temperatures.ravel()
def dTdP(temperature, pressure, rock):
"""
ODE to integrate temperature with depth for a composite material
Assumes that the minerals exist at a common pressure (Reuss bound, should be good for
slow deformations at high temperature), as well as an adiabatic process. This
corresponds to conservation of enthalpy.
First consider compression of the composite to a new pressure P+dP. They all heat up
different amounts dT[i], according to their thermoelastic parameters. Then allow them
to equilibrate to a constant temperature dT, conserving heat within the composite.
This works out to the formula: dT/dP = T*sum(frac[i]*Cp[i]*gr[i]/K[i])/sum(frac[i]*Cp[i])
Returns: a single number, dT/dP [K/Pa] for the composite
"""
top = 0
bottom = 0
rock.set_state(pressure, temperature)
(fractions,minerals) = rock.unroll()
for (fr,mineral) in zip(fractions,minerals):
gr = mineral.grueneisen_parameter()
K_s = mineral.adiabatic_bulk_modulus()
C_p = mineral.heat_capacity_p()
top += fr*gr*C_p/K_s
bottom += fr*C_p
return temperature*top/bottom
table_brown = read_table("input_geotherm/brown_81.txt")
table_brown_depth = np.array(table_brown)[:,0]
table_brown_temperature = np.array(table_brown)[:,1]
table_anderson = read_table("input_geotherm/anderson_82.txt")
table_anderson_depth = np.array(table_anderson)[:,0]
table_anderson_temperature = np.array(table_anderson)[:,1]
# test geotherm
if __name__ == "__main__":
p = np.arange(1.0e9,128.0e9,3e9)
pyrolite = burnman.composite( [ (burnman.minerals.SLB_2011.mg_fe_perovskite(0.2), 0.8), (burnman.minerals.SLB_2011.ferropericlase(0.4), 0.2) ] )
pyrolite.set_method('slb3')
pyrolite.set_state(40.e9, 2000)
t1 = watson_baxter(p)
t2 = brown_shankland(p)
t3 = adiabatic(p, 1600, pyrolite)
p1,=pyplot.plot(p,t1,'x--r')
p2,=pyplot.plot(p,t2,'*-g')
p3,=pyplot.plot(p,t3,'*-b')
pyplot.legend([p1,p2,p3],[ "watson", "brown", "adiabatic"], loc=4)
pyplot.show()
| gpl-2.0 | -6,482,715,697,707,091,000 | 33.45082 | 148 | 0.675708 | false |
rootio/rootio_web | alembic/versions/5e35694cdba_chng_default_dt.py | 1 | 1425 | """chng_default_dt
Revision ID: 5e35694cdba
Revises: 43b646fd9f17
Create Date: 2017-08-23 14:19:50.467309
"""
# revision identifiers, used by Alembic.
revision = '5e35694cdba'
down_revision = '43b646fd9f17'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(u'content_podcast', sa.Column('date_created', sa.DateTime(timezone=True), server_default='now()', nullable=True))
op.alter_column(u'content_podcast', 'updated_at',
existing_type=postgresql.TIMESTAMP(timezone=True),
nullable=True,
existing_server_default="2017-01-11 21:14:53.469648+00'::timestamp with time zone")
op.add_column(u'content_podcastdownload', sa.Column('date_downloaded', sa.DateTime(timezone=True), server_default='now()', nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'content_podcastdownload', 'date_downloaded')
op.alter_column(u'content_podcast', 'updated_at',
existing_type=postgresql.TIMESTAMP(timezone=True),
nullable=False,
existing_server_default="2017-01-11 21:14:53.469648+00'::timestamp with time zone")
op.drop_column(u'content_podcast', 'date_created')
### end Alembic commands ###
| agpl-3.0 | 8,971,753,639,186,360,000 | 38.583333 | 142 | 0.686316 | false |
valtandor/easybuild-framework | easybuild/toolchains/compiler/gcc.py | 4 | 4198 | ##
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Support for GCC (GNU Compiler Collection) as toolchain compiler.
@author: Stijn De Weirdt (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import easybuild.tools.systemtools as systemtools
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.toolchain.compiler import Compiler
TC_CONSTANT_GCC = "GCC"
class Gcc(Compiler):
"""GCC compiler class"""
COMPILER_MODULE_NAME = ['GCC']
COMPILER_FAMILY = TC_CONSTANT_GCC
COMPILER_UNIQUE_OPTS = {
'loop': (False, "Automatic loop parallellisation"),
'f2c': (False, "Generate code compatible with f2c and f77"),
'lto':(False, "Enable Link Time Optimization"),
}
COMPILER_UNIQUE_OPTION_MAP = {
'i8': 'fdefault-integer-8',
'r8': 'fdefault-real-8',
'unroll': 'funroll-loops',
'f2c': 'ff2c',
'loop': ['ftree-switch-conversion', 'floop-interchange', 'floop-strip-mine', 'floop-block'],
'lto': 'flto',
'openmp': 'fopenmp',
'strict': ['mieee-fp', 'mno-recip'],
'precise':['mno-recip'],
'defaultprec':[],
'loose': ['mrecip', 'mno-ieee-fp'],
'veryloose': ['mrecip=all', 'mno-ieee-fp'],
}
# used when 'optarch' toolchain option is enabled (and --optarch is not specified)
COMPILER_OPTIMAL_ARCHITECTURE_OPTION = {
systemtools.AMD : 'march=native',
systemtools.INTEL : 'march=native',
systemtools.POWER: 'mcpu=native', # no support for march=native on POWER
}
# used with --optarch=GENERIC
COMPILER_GENERIC_OPTION = {
systemtools.AMD : 'march=x86-64 -mtune=generic',
systemtools.INTEL : 'march=x86-64 -mtune=generic',
systemtools.POWER: 'mcpu=generic-arch', # no support for -march on POWER
}
COMPILER_CC = 'gcc'
COMPILER_CXX = 'g++'
COMPILER_C_UNIQUE_FLAGS = []
COMPILER_F77 = 'gfortran'
COMPILER_F90 = 'gfortran'
COMPILER_FC = 'gfortran'
COMPILER_F_UNIQUE_FLAGS = ['f2c']
LIB_MULTITHREAD = ['pthread']
LIB_MATH = ['m']
def _set_compiler_vars(self):
super(Gcc, self)._set_compiler_vars()
if self.options.get('32bit', None):
raise EasyBuildError("_set_compiler_vars: 32bit set, but no support yet for 32bit GCC in EasyBuild")
# to get rid of lots of problems with libgfortranbegin
# or remove the system gcc-gfortran
# also used in eg LIBBLAS variable
self.variables.nappend('FLIBS', "gfortran", position=5)
# append lib dir paths to LDFLAGS (only if the paths are actually there)
# Note: hardcode 'GCC' here; we can not reuse COMPILER_MODULE_NAME because
# it can be redefined by combining GCC with other compilers (e.g., Clang).
gcc_root = get_software_root('GCCcore')
if gcc_root is None:
gcc_root = get_software_root('GCC')
if gcc_root is None:
raise EasyBuildError("Failed to determine software root for GCC")
self.variables.append_subdirs("LDFLAGS", gcc_root, subdirs=["lib64", "lib"])
| gpl-2.0 | 54,587,703,296,905,520 | 36.482143 | 112 | 0.661267 | false |
beraldoleal/entendaobrasil | api/camara/base.py | 1 | 1661 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Beraldo Leal <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version
# 2 as published by the Free Software Foundation.
#
# vim: tabstop=4:softtabstop=4:shiftwidth=4:expandtab
from suds.client import Client
from entendaobrasil import settings
from datetime import datetime
import urllib2
import logging
logging.basicConfig(level=settings.CAMARA_API_LOG_LEVEL)
logger = logging.getLogger('entendaobrasil.api.camara')
class CamaraAPI(object):
def __init__(self):
# Baixa wsdl e instancia cliente
try:
logger.info(u"Baixando wsdl a partir de %s ..." % self.wsdl_url)
self.client = Client(self.wsdl_url)
self.service = self.client.service
except urllib2.URLError:
logger.error(u"Erro ao baixar wsdl. Abortando....")
def executar_metodo(self, metodo, *args, **kwargs):
logger.info(u"Acessando metodo %s ..." % metodo)
return getattr(self.service, metodo)(*args, **kwargs)
def as_list(self, element):
if type(element) == list:
return element
else:
return [element]
def as_text(self, valor):
return " ".join(valor.split())
def as_text_without_spaces(self, valor):
return "".join(valor.split())
def as_date(self, date, date_format="%d/%m/%Y"):
try:
return datetime.strptime(date, date_format)
except ValueError:
logger.debug(u"Data em formato inválido.")
return None
| gpl-2.0 | 8,964,178,856,227,553,000 | 29.181818 | 76 | 0.642771 | false |
BuildingLink/sentry | tests/sentry/digests/test_notifications.py | 6 | 3809 | from __future__ import absolute_import
from collections import (
OrderedDict,
defaultdict,
)
from exam import fixture
from six.moves import reduce
from sentry.digests import Record
from sentry.digests.notifications import (
Notification,
event_to_record,
rewrite_record,
group_records,
sort_group_contents,
sort_rule_groups,
)
from sentry.models import Rule
from sentry.testutils import TestCase
class RewriteRecordTestCase(TestCase):
@fixture
def rule(self):
return self.event.project.rule_set.all()[0]
@fixture
def record(self):
return event_to_record(self.event, (self.rule,))
def test_success(self):
assert rewrite_record(
self.record,
project=self.event.project,
groups={
self.event.group.id: self.event.group,
},
rules={
self.rule.id: self.rule,
},
) == Record(
self.record.key,
Notification(
self.event,
[self.rule],
),
self.record.timestamp,
)
def test_without_group(self):
# If the record can't be associated with a group, it should be returned as None.
assert rewrite_record(
self.record,
project=self.event.project,
groups={},
rules={
self.rule.id: self.rule,
},
) is None
def test_filters_invalid_rules(self):
# If the record can't be associated with a group, it should be returned as None.
assert rewrite_record(
self.record,
project=self.event.project,
groups={
self.event.group.id: self.event.group,
},
rules={},
) == Record(
self.record.key,
Notification(self.event, []),
self.record.timestamp,
)
class GroupRecordsTestCase(TestCase):
@fixture
def rule(self):
return self.project.rule_set.all()[0]
def test_success(self):
events = [self.create_event(group=self.group) for _ in range(3)]
records = [Record(event.id, Notification(event, [self.rule]), event.datetime) for event in events]
assert reduce(group_records, records, defaultdict(lambda: defaultdict(list))) == {
self.rule: {
self.group: records,
},
}
class SortRecordsTestCase(TestCase):
def test_success(self):
Rule.objects.create(
project=self.project,
label='Send a notification for regressions',
data={
'match': 'all',
'conditions': [
{'id': 'sentry.rules.conditions.regression_event.RegressionEventCondition'},
],
'actions': [
{'id': 'sentry.rules.actions.notify_event.NotifyEventAction'},
],
}
)
rules = list(self.project.rule_set.all())
groups = [self.create_group() for _ in range(3)]
groups[0].event_count = 10
groups[0].user_count = 4
groups[1].event_count = 5
groups[1].user_count = 2
groups[2].event_count = 5
groups[2].user_count = 1
grouped = {
rules[0]: {
groups[0]: [],
},
rules[1]: {
groups[1]: [],
groups[2]: [],
},
}
assert sort_rule_groups(sort_group_contents(grouped)) == OrderedDict((
(rules[1], OrderedDict((
(groups[1], []),
(groups[2], []),
))),
(rules[0], OrderedDict((
(groups[0], []),
))),
))
| bsd-3-clause | -1,414,423,788,055,724,000 | 26.402878 | 106 | 0.513521 | false |
abdinoor/Watson | watson/grammar.py | 1 | 5178 |
class Constant(object):
'''
A token object that represents a string literal. The literal may have a / in it to
denote multiple values
'''
def __init__(self, name):
self.values = name.split("/")
def match(self, word):
'''
Returns whether or not the word matches this constant's patterns
'''
return word in self.values
def __repr__(self):
return "/".join(self.values)
class Variable(object):
'''
A token object that represents a variable. All variables must contain < and >, and
can have a prefix and/or suffix
'''
def __init__(self, name):
i = name.find("<")
j = name.find(">")
inner = name[i + 1:j]
if not inner:
raise Exception
parts = inner.split("=")
if len(parts) == 1:
self.name = parts[0]
self.options = []
else:
# they put in some options we should save
self.name = parts[0]
self.options = parts[1].split("/")
self.prefix = name[:i]
self.postfix = name[j + 1:]
self.value = None
def match(self, word):
'''
Returns whether or not the given word matches this variable's pattern, and sets
its value if there is a match
'''
if word.startswith(self.prefix) and word.endswith(self.postfix):
value = word[len(self.prefix):len(word) - len(self.postfix)]
if not self.options or value in self.options:
self.value = value
return True
return False
def __repr__(self):
return self.prefix + "<" + self.name + ">" + self.postfix
def _create_options(string):
'''
Takes a syntax string and parses out all matching square brackets, and returns a list
of all combinations of syntaxes that could be formed if the brackets were there or not
ARGUMENTS
string - the syntax string
RETURNS
a list of syntax strings that match whether the optional pieces may be present or not
EXAMPLES
_create_options("cow") -> ["cow"]
_create_options("cow [or lamb]") -> ["cow","cow or lamb"]
_create_options("cow [or lamb[ada]]") -> ["cow","cow or lamb", "cow or lambada"]
'''
count = 0
had_counted = False
i = 0
options = []
for j in range(len(string)):
if string[j] == "[":
count += 1
if not had_counted:
i = j
had_counted = True
if string[j] == "]":
count -= 1
if not had_counted:
raise Exception
if count == 0 and had_counted:
if not options:
options = [_create_options(string[:i] + string[i + 1:j] + string[j + 1:]),
_create_options(string[:i] + string[j + 1:])]
options = [x for y in options for x in y]
if count != 0:
raise Exception
if not options:
options = [string]
return options
def _populate_results(grammar):
'''
Takes a grammar that has been matched, and returns the values of its variables
'''
result = dict()
for node in grammar:
if isinstance(node, Variable):
result[node.name] = node.value
return result
def _create_grammar(grammar_string):
'''
Creates a grammar construct from a string by tokenizing by whitespace, then creating
Constants and Variables for each token
'''
grammar = []
words = grammar_string.split()
for word in words:
if word.find("<") >= 0 or word.find(">") >= 0:
if not (word.count("<") == 1 and word.count(">") == 1 and word.find(">") > word.find("<")):
raise Exception
node = Variable(word)
else:
node = Constant(word)
grammar.append(node)
return grammar
def create_grammars(grammar_string):
'''
Creates a list of all possible grammar objects from a string that may contain
optional parts
'''
options = _create_options(grammar_string)
return [_create_grammar(option) for option in options]
def _match_grammar(string, grammar):
'''
Determines if a string is a match for a grammar construct
'''
words = string.split()
last = len(grammar) - 1
for i, node in enumerate(grammar):
if i > len(words) - 1:
return False
if i == last:
return _populate_results(grammar) if node.match(" ".join(words[i:])) else False
if not node.match(words[i]):
return False
def match_grammars(string, grammars):
'''
Takes a string and an iterable of grammars, and returns True if any of the grammars
are matched by the string
ARGUMENTS
string - the input string we're matching against
grammars - an iterable of grammars to check against
RETURNS
True if any of the grammars matched, False if not
'''
for grammar in grammars:
result = _match_grammar(string, grammar)
if result is not False:
return result
return False
| mit | 9,154,911,169,913,797,000 | 27.607735 | 103 | 0.562766 | false |
gcd0318/python-evdev | evdev/uinput.py | 4 | 6982 | # encoding: utf-8
import os
import stat
import time
from evdev import _uinput
from evdev import ecodes, util, device
class UInputError(Exception):
pass
class UInput(object):
'''
A userland input device and that can inject input events into the
linux input subsystem.
'''
__slots__ = (
'name', 'vendor', 'product', 'version', 'bustype',
'events', 'devnode', 'fd', 'device',
)
def __init__(self,
events=None,
name='py-evdev-uinput',
vendor=0x1, product=0x1, version=0x1, bustype=0x3,
devnode='/dev/uinput'):
'''
:param events: the event types and codes that the uinput
device will be able to inject - defaults to all
key codes.
:type events: dictionary of event types mapping to lists of
event codes.
:param name: the name of the input device.
:param vendor: vendor identifier.
:param product: product identifier.
:param version: version identifier.
:param bustype: bustype identifier.
.. note:: If you do not specify any events, the uinput device
will be able to inject only ``KEY_*`` and ``BTN_*``
event codes.
'''
self.name = name #: Uinput device name.
self.vendor = vendor #: Device vendor identifier.
self.product = product #: Device product identifier.
self.version = version #: Device version identifier.
self.bustype = bustype #: Device bustype - eg. ``BUS_USB``.
self.devnode = devnode #: Uinput device node - eg. ``/dev/uinput/``.
if not events:
events = {ecodes.EV_KEY: ecodes.keys.keys()}
# the min, max, fuzz and flat values for the absolute axis for
# a given code
absinfo = []
self._verify()
#: Write-only, non-blocking file descriptor to the uinput device node.
self.fd = _uinput.open(devnode)
# set device capabilities
for etype, codes in events.items():
for code in codes:
# handle max, min, fuzz, flat
if isinstance(code, (tuple, list, device.AbsInfo)):
# flatten (ABS_Y, (0, 255, 0, 0)) to (ABS_Y, 0, 255, 0, 0)
f = [code[0]]; f += code[1]
absinfo.append(f)
code = code[0]
#:todo: a lot of unnecessary packing/unpacking
_uinput.enable(self.fd, etype, code)
# create uinput device
_uinput.create(self.fd, name, vendor, product, version, bustype, absinfo)
#: An :class:`InputDevice <evdev.device.InputDevice>` instance
#: for the fake input device. ``None`` if the device cannot be
#: opened for reading and writing.
self.device = self._find_device()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if hasattr(self, 'fd'):
self.close()
def __repr__(self):
# :todo:
v = (repr(getattr(self, i)) for i in
('name', 'bustype', 'vendor', 'product', 'version'))
return '{}({})'.format(self.__class__.__name__, ', '.join(v))
def __str__(self):
msg = ('name "{}", bus "{}", vendor "{:04x}", product "{:04x}", version "{:04x}"\n'
'event types: {}')
evtypes = [i[0] for i in self.capabilities(True).keys()]
msg = msg.format(self.name, ecodes.BUS[self.bustype],
self.vendor, self.product,
self.version, ' '.join(evtypes))
return msg
def close(self):
# close the associated InputDevice, if it was previously opened
if self.device is not None:
self.device.close()
# destroy the uinput device
if self.fd > -1:
_uinput.close(self.fd)
self.fd = -1
def write_event(self, event):
'''
Inject an input event into the input subsystem. Events are
queued until a synchronization event is received.
:param event: InputEvent instance or an object with an
``event`` attribute (:class:`KeyEvent
<evdev.events.KeyEvent>`, :class:`RelEvent
<evdev.events.RelEvent>` etc).
Example::
ev = InputEvent(1334414993, 274296, ecodes.EV_KEY, ecodes.KEY_A, 1)
ui.write_event(ev)
'''
if hasattr(event, 'event'):
event = event.event
self.write(event.type, event.code, event.value)
def write(self, etype, code, value):
'''
Inject an input event into the input subsystem. Events are
queued until a synchronization event is received.
:param etype: event type (eg. ``EV_KEY``).
:param code: event code (eg. ``KEY_A``).
:param value: event value (eg. 0 1 2 - depends on event type).
Example::
ui.write(e.EV_KEY, e.KEY_A, 1) # key A - down
ui.write(e.EV_KEY, e.KEY_A, 0) # key A - up
'''
_uinput.write(self.fd, etype, code, value)
def syn(self):
'''
Inject a ``SYN_REPORT`` event into the input subsystem. Events
queued by :func:`write()` will be fired. If possible, events
will be merged into an 'atomic' event.
'''
_uinput.write(self.fd, ecodes.EV_SYN, ecodes.SYN_REPORT, 0)
def capabilities(self, verbose=False, absinfo=True):
'''See :func:`capabilities <evdev.device.InputDevice.capabilities>`.'''
if self.device is None:
raise UInputError('input device not opened - cannot read capabilites')
return self.device.capabilities(verbose, absinfo)
def _verify(self):
'''
Verify that an uinput device exists and is readable and writable
by the current process.
'''
try:
m = os.stat(self.devnode)[stat.ST_MODE]
if not stat.S_ISCHR(m):
raise
except (IndexError, OSError):
msg = '"{}" does not exist or is not a character device file '\
'- verify that the uinput module is loaded'
raise UInputError(msg.format(self.devnode))
if not os.access(self.devnode, os.W_OK):
msg = '"{}" cannot be opened for writing'
raise UInputError(msg.format(self.devnode))
if len(self.name) > _uinput.maxnamelen:
msg = 'uinput device name must not be longer than {} characters'
raise UInputError(msg.format(_uinput.maxnamelen))
def _find_device(self):
#:bug: the device node might not be immediately available
time.sleep(0.1)
for fn in util.list_devices('/dev/input/'):
d = device.InputDevice(fn)
if d.name == self.name:
return d
| bsd-3-clause | -7,001,426,001,989,759,000 | 32.567308 | 91 | 0.549413 | false |
mgit-at/ansible | test/units/modules/network/f5/test_bigip_config.py | 21 | 3285 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_config import Parameters
from library.modules.bigip_config import ModuleManager
from library.modules.bigip_config import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_config import Parameters
from ansible.modules.network.f5.bigip_config import ModuleManager
from ansible.modules.network.f5.bigip_config import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
save='yes',
reset='yes',
merge_content='asdasd',
verify='no',
server='localhost',
user='admin',
password='password'
)
p = Parameters(params=args)
assert p.save == 'yes'
assert p.reset == 'yes'
assert p.merge_content == 'asdasd'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_run_single_command(self, *args):
set_module_args(dict(
save='yes',
reset='yes',
merge_content='asdasd',
verify='no',
server='localhost',
user='admin',
password='password'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exit_json = Mock(return_value=True)
mm.reset_device = Mock(return_value='reset output')
mm.upload_to_device = Mock(return_value=True)
mm.move_on_device = Mock(return_value=True)
mm.merge_on_device = Mock(return_value='merge output')
mm.remove_temporary_file = Mock(return_value=True)
mm.save_on_device = Mock(return_value='save output')
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 | -7,887,804,610,251,092,000 | 27.565217 | 91 | 0.641705 | false |
olifre/root | tutorials/pyroot/first.py | 28 | 1676 | ## \file
## \ingroup tutorial_pyroot
## \notebook
## My first PyROOT interactive session
##
## \macro_image
## \macro_code
##
## \author Wim Lavrijsen
from ROOT import TCanvas, TF1, TPaveLabel, TPad, TText
from ROOT import gROOT
nut = TCanvas( 'nut', 'FirstSession', 100, 10, 700, 900 )
nut.Range( 0, 0, 20, 24 )
nut.SetFillColor( 10 )
nut.SetBorderSize( 2 )
pl = TPaveLabel( 3, 22, 17, 23.7, 'My first PyROOT interactive session', 'br' )
pl.SetFillColor( 18 )
pl.Draw()
t = TText( 0, 0, 'a' )
t.SetTextFont( 62 )
t.SetTextSize( 0.025 )
t.SetTextAlign( 12 )
t.DrawText( 2, 20.3, 'PyROOT provides ROOT bindings for Python, a powerful interpreter.' )
t.DrawText( 2, 19.3, 'Blocks of lines can be entered typographically.' )
t.DrawText( 2, 18.3, 'Previous typed lines can be recalled.' )
t.SetTextFont( 72 )
t.SetTextSize( 0.026 )
t.DrawText( 3, 17, r'>>> x, y = 5, 7' )
t.DrawText( 3, 16, r'>>> import math; x*math.sqrt(y)' )
t.DrawText( 3, 14, r'>>> for i in range(2,7): print "sqrt(%d) = %f" % (i,math.sqrt(i))' )
t.DrawText( 3, 10, r'>>> import ROOT; f1 = ROOT.TF1( "f1", "sin(x)/x", 0, 10 )' )
t.DrawText( 3, 9, r'>>> f1.Draw()' )
t.SetTextFont( 81 )
t.SetTextSize( 0.018 )
t.DrawText( 4, 15, '13.228756555322953' )
t.DrawText( 4, 13.3, 'sqrt(2) = 1.414214' )
t.DrawText( 4, 12.7, 'sqrt(3) = 1.732051' )
t.DrawText( 4, 12.1, 'sqrt(4) = 2.000000' )
t.DrawText( 4, 11.5, 'sqrt(5) = 2.236068' )
t.DrawText( 4, 10.9, 'sqrt(6) = 2.449490' )
pad = TPad( 'pad', 'pad', .2, .05, .8, .35 )
pad.SetFillColor( 42 )
pad.SetFrameFillColor( 33 )
pad.SetBorderSize( 10 )
pad.Draw()
pad.cd()
pad.SetGrid()
f1 = TF1( 'f1', 'sin(x)/x', 0, 10 )
f1.Draw()
nut.cd()
nut.Update()
| lgpl-2.1 | 310,565,348,949,317,100 | 27.40678 | 90 | 0.623508 | false |
bmya/odoo-infrastructure | infrastructure/models/server_hostname.py | 7 | 4232 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api, _
from openerp.exceptions import Warning
from fabtools import require
import os
class server_hostname(models.Model):
""""""
_name = 'infrastructure.server_hostname'
_description = 'server_hostname'
_order = 'sequence'
_sql_constraints = [
('name_uniq', 'unique(name, wildcard, server_id)',
'Hostname/wildcard must be unique per server!'),
]
sequence = fields.Integer(
'Sequence',
default=10,
)
name = fields.Char(
string='Name',
required=True
)
wildcard = fields.Boolean(
string='Wild Card'
)
domain_regex = fields.Char(
string='Domain Regex',
required=True,
)
server_id = fields.Many2one(
'infrastructure.server',
string='Server',
ondelete='cascade',
required=True
)
partner_id = fields.Many2one(
'res.partner',
'Partner',
help='If partner is set, then this hostname will be only availble\
for this partner databases and instances'
)
ssl_available = fields.Boolean(
string='SSL Available?',
)
ssl_intermediate_certificate = fields.Text(
string='SSL Intermediate Certificate',
)
ssl_certificate = fields.Text(
string='SSL Certificate',
)
ssl_certificate_key = fields.Text(
string='SSL Certificate KEY',
)
ssl_certificate_path = fields.Char(
string='SSL Certificate',
compute='get_certificate_paths'
)
ssl_certificate_key_path = fields.Char(
string='SSL Certificate',
compute='get_certificate_paths'
)
@api.one
@api.depends('name')
def get_certificate_paths(self):
name = self.name
if self.wildcard:
name += '_wildcard'
base_file_path = os.path.join(self.server_id.ssl_path, name)
self.ssl_certificate_path = base_file_path + '.crt'
self.ssl_certificate_key_path = base_file_path + '.key'
@api.onchange('wildcard', 'name')
def _get_domain_regex(self):
domain_regex = False
if self.name:
if self.wildcard:
domain_regex = '/[@.]' + '\\.'.join(
self.name.split('.')) + '$/'
# "/[@.]domain\.com\.ar$/"
else:
domain_regex = '/(.*)' + '\\.'.join(
self.name.split('.')) + '$/'
# "/(.*)tuukan\.com$/"
self.domain_regex = domain_regex
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name', 'wildcard'], context=context)
res = []
for record in reads:
name = record['name']
if record['wildcard']:
name += _(' - Wildcard')
res.append((record['id'], name))
return res
@api.one
def load_ssl_certficiate(self):
self.server_id.get_env()
if not self.ssl_available:
return False
if not self.ssl_certificate or not self.ssl_certificate_key:
raise Warning(_(
'To configure SSL you need to set ssl certificates and keys'))
# TODO add ssl path in server data
certificate = self.ssl_certificate
if self.ssl_intermediate_certificate:
certificate += ('\n%s') % (self.ssl_intermediate_certificate)
require.files.directory(
self.server_id.ssl_path, use_sudo=True,
owner='', group='', mode='600')
require.file(
path=self.ssl_certificate_path,
contents=certificate,
use_sudo=True)
require.file(
path=self.ssl_certificate_key_path,
contents=self.ssl_certificate_key,
use_sudo=True)
| agpl-3.0 | 5,929,500,091,521,291,000 | 31.060606 | 78 | 0.526229 | false |
VirrageS/io-kawiarnie | caffe/cash/migrations/0008_auto_20160615_1957.py | 1 | 1422 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-15 17:57
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('caffe', '0001_initial'),
('cash', '0007_auto_20160526_1707'),
]
operations = [
migrations.AddField(
model_name='cashreport',
name='caffe',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='caffe.Caffe'),
),
migrations.AddField(
model_name='company',
name='caffe',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='caffe.Caffe'),
),
migrations.AddField(
model_name='expense',
name='caffe',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='caffe.Caffe'),
),
migrations.AddField(
model_name='fullexpense',
name='caffe',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='caffe.Caffe'),
),
migrations.AlterField(
model_name='company',
name='name',
field=models.CharField(max_length=200),
),
]
| mit | 2,469,195,989,864,711,700 | 32.857143 | 124 | 0.592827 | false |
tensorflow/neural-structured-learning | research/a2n/utils.py | 1 | 3047 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for project A2N."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import tensorflow as tf
def combine_dict(init_dict, add_dict):
"""Add add_dict to init_dict and return init_dict."""
for k, v in add_dict.iteritems():
init_dict[k] = v
return init_dict
def add_variable_summaries(var, var_name_scope):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries/' + var_name_scope):
mean = tf.reduce_mean(var)
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
var_max = tf.reduce_max(var)
var_min = tf.reduce_min(var)
tf.summary.scalar('mean', mean)
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', var_max)
tf.summary.scalar('min', var_min)
tf.summary.histogram('histogram', var)
def add_histogram_summary(var, var_name_scope):
"""Just adds a histogram summary for the variable."""
with tf.name_scope('summaries/' + var_name_scope):
tf.summary.histogram('histogram', var)
def read_entity_name_mapping(entity_names_file):
"""Read mapping from entity mid to names."""
entity_names = {}
with open(entity_names_file) as gf:
if entity_names_file.endswith('.gz'):
f = gzip.GzipFile(fileobj=gf)
else:
f = gf
for line in f:
contents = line.strip().split('\t')
if len(contents) < 2:
continue
# mid, name = contents
mid = contents[0]
name = contents[1]
entity_names['/' + mid] = name
return entity_names
def save_embedding_vocabs(output_dir, graph, entity_names_file=None):
"""Save entity and relation vocabs to file."""
# Read entity names
entity_names = None
if entity_names_file:
entity_names = read_entity_name_mapping(entity_names_file)
# Save entity vocab
with open(output_dir + '/entity_vocab.tsv', 'w+') as f:
for i in range(graph.ent_vocab_size):
name = graph.inverse_entity_vocab[i]
if entity_names and name in entity_names:
name += '/' + entity_names[name]
f.write(name + '\n')
with open(output_dir + '/relation_vocab.tsv', 'w+') as f:
for i in range(graph.rel_vocab_size):
f.write(graph.inverse_relation_vocab[i] + '\n')
if hasattr(graph, 'vocab'):
with open(output_dir + '/word_vocab.tsv', 'w+') as f:
for i in range(graph.word_vocab_size):
f.write(graph.inverse_word_vocab[i] + '\n')
| apache-2.0 | -5,664,629,057,037,464,000 | 33.235955 | 78 | 0.67148 | false |
dennishuo/dataproc-initialization-actions | rapids/test_rapids.py | 1 | 1795 | import os
import unittest
from parameterized import parameterized
from integration_tests.dataproc_test_case import DataprocTestCase
class RapidsTestCase(DataprocTestCase):
COMPONENT = 'rapids'
INIT_ACTIONS = ['rapids/rapids.sh']
TEST_SCRIPT_FILE_NAME = 'verify_rapids.py'
def verify_instance(self, name):
self.upload_test_file(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
self.TEST_SCRIPT_FILE_NAME), name)
self.__run_test_script(name)
self.remove_test_script(self.TEST_SCRIPT_FILE_NAME, name)
def __run_test_script(self, name):
verify_cmd = "/opt/conda/anaconda/envs/RAPIDS/bin/python {}".format(
self.TEST_SCRIPT_FILE_NAME)
self.assert_instance_command(name, verify_cmd)
@parameterized.expand(
[("STANDARD", "1.3", ["m"])],
testcase_func_name=DataprocTestCase.generate_verbose_test_name)
def test_rapids(self, configuration, dataproc_version, machine_suffixes):
metadata = 'INIT_ACTIONS_REPO={}'.format(self.INIT_ACTIONS_REPO)
self.createCluster(configuration,
self.INIT_ACTIONS,
dataproc_version,
metadata=metadata,
beta=True,
master_accelerator='type=nvidia-tesla-p100',
worker_accelerator='type=nvidia-tesla-p100',
optional_components='ANACONDA',
timeout_in_minutes=20)
for machine_suffix in machine_suffixes:
self.verify_instance("{}-{}".format(self.getClusterName(),
machine_suffix))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -2,271,468,749,975,615,500 | 36.395833 | 77 | 0.577159 | false |
hephaestus9/Ironworks | modules_lib/xbmc/recently_added.py | 1 | 2688 | # -*- coding: utf-8 -*-
from flask import render_template
import jsonrpclib, ast, os
@app.route('/recently_added_episodes/<server_id>')
def recently_added_episodes(server_id):
return render_recently_added_episodes(server_id)
@app.route('/recently_added_movies/<server_id>')
def recently_added_movies(server_id):
return render_recently_added_movies(server_id)
@app.route('/recently_added_albums/<server_id>')
def xhr_recently_added_albums(server_id):
return render_recently_added_albums(server_id)
def render_recently_added_episodes(server_id):
xbmc = jsonrpclib.Server(get_recent_xbmc_api_url('recently_added_server'))
recently_added_episodes = get_recently_added_episodes(xbmc, server)
return render_template('recently_added/tv.html',
recently_added_episodes = recently_added_episodes)
def render_recently_added_movies(server_id):
xbmc = jsonrpclib.Server(get_recent_xbmc_api_url('recently_added_movies_server'))
recently_added_movies = get_recently_added_movies(xbmc, server)
return render_template('recently_added/movies.html',
recently_added_movies = recently_added_movies)
def render_recently_added_albums(server_id):
xbmc = jsonrpclib.Server(get_recent_xbmc_api_url('recently_added_albums_server'))
recently_added_albums = get_recently_added_albums(xbmc, server)
return render_template('recently_added/albums.html',
recently_added_albums = recently_added_albums)
def get_recently_added_episodes(xbmc, server):
num_recent_videos = 28
try:
recently_added_episodes = xbmc.VideoLibrary.GetRecentlyAddedEpisodes(properties = ['title', 'season', 'episode', 'showtitle', 'playcount', 'thumbnail', 'tvshowid'])['episodes']
except:
recently_added_episodes = []
return recently_added_episodes
def get_recently_added_movies(xbmc, server):
num_recent_videos = 28
try:
recently_added_movies = xbmc.VideoLibrary.GetRecentlyAddedMovies(properties = ['title', 'year', 'rating', 'playcount', 'thumbnail'])['movies']
except:
recently_added_movies = []
return recently_added_movies
def get_recently_added_albums(xbmc, server):
num_recent_albums = 28
try:
recently_added_albums = xbmc.AudioLibrary.GetRecentlyAddedAlbums(properties = ['title', 'year', 'rating', 'artist', 'thumbnail'])['albums']
for album in recently_added_albums:
if 'artist' in album and isinstance(album['artist'], list): #Frodo
album['artist'] = " / ".join(album['artist'])
except:
recently_added_albums = []
return recently_added_albums
| mit | 2,522,911,749,063,268,000 | 32.6 | 184 | 0.685268 | false |
gklyne/annalist | src/annalist_root/annalist/tests/entity_testlistdata.py | 1 | 14216 | """
Utility functions to support entity data testing
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import os
from utils.py3porting import urljoin
from django.conf import settings
from django.http import QueryDict
from django.utils.http import urlquote, urlunquote
from django.core.urlresolvers import resolve, reverse
from annalist.util import valid_id
from annalist.identifiers import RDF, RDFS, ANNAL
from annalist import layout
from annalist.views.form_utils.fieldchoice import FieldChoice
from annalist.views.fields.render_placement import (
get_placement_classes
)
from .tests import (
TestHost, TestHostUri, TestBasePath, TestBaseUri, TestBaseDir
)
from .entity_testutils import (
collection_dir,
site_title,
collection_entity_view_url,
context_field_row
)
from .entity_testentitydata import entitydata_list_type_url
from .entity_testfielddesc import get_field_description, get_bound_field
from .entity_testtypedata import recordtype_url
from .entity_testsitedata import (
make_field_choices, no_selection,
get_site_types, get_site_types_sorted, get_site_types_linked,
get_site_lists, get_site_lists_sorted, get_site_lists_linked,
get_site_views, get_site_views_sorted, get_site_views_linked,
get_site_list_types, get_site_list_types_sorted, get_site_list_types_linked,
get_site_field_groups, get_site_field_groups_sorted,
get_site_fields, get_site_fields_sorted,
get_site_field_types, get_site_field_types_sorted,
)
# -----------------------------------------------------------------------------
#
# Constants
#
# -----------------------------------------------------------------------------
# Defined here to facilitate test suite changes
num_testcoll_enumerate_all_entities = 187 # Entities defined
num_testcoll_all_entities_scope_all = 184 # Entities listed
num_testcoll_inherit_entities_scope_all = 238 # Entities listed, including inherited
# -----------------------------------------------------------------------------
#
# Directory generating functions
#
# -----------------------------------------------------------------------------
def recordlist_dir(coll_id="testcoll", list_id="testlist"):
return collection_dir(coll_id) + layout.COLL_LIST_PATH%{'id': list_id} + "/"
# -----------------------------------------------------------------------------
#
# URI generating functions
#
# -----------------------------------------------------------------------------
def recordlist_coll_url(site, coll_id="testcoll", list_id="testlist"):
return urljoin(
site._entityurl,
layout.SITE_COLL_PATH%{'id': coll_id} + "/" +
layout.COLL_LIST_PATH%{'id': list_id} + "/"
)
def recordlist_url(coll_id, list_id):
"""
URI for record list description data; also view using default entity view
"""
if not valid_id(list_id):
list_id = "___"
return collection_entity_view_url(coll_id=coll_id, type_id="_list", entity_id=list_id)
def recordlist_edit_url(action=None, coll_id=None, list_id=None):
"""
URI for record list description editing view
"""
viewname = (
'AnnalistEntityDataView' if action == "view" else
'AnnalistEntityNewView' if action == "new" else
'AnnalistEntityEditView' if action == "copy" else
'AnnalistEntityEditView' if action == "edit" else
'AnnalistRecordListDeleteView' if action == "delete" else
'unknown'
)
kwargs = {'coll_id': coll_id}
if action != "delete":
kwargs.update({'action': action, 'type_id': "_list", 'view_id': "List_view"})
if list_id:
if valid_id(list_id):
kwargs.update({'entity_id': list_id})
else:
kwargs.update({'entity_id': "___"})
return reverse(viewname, kwargs=kwargs)
# -----------------------------------------------------------------------------
#
# ----- RecordList data
#
# -----------------------------------------------------------------------------
def recordlist_value_keys(list_uri=False):
keys = set(
[ 'annal:id', 'annal:type_id'
, 'annal:type'
, 'annal:url'
, 'rdfs:label', 'rdfs:comment'
, 'annal:display_type'
, 'annal:list_entity_selector'
, 'annal:default_view'
, 'annal:default_type'
, 'annal:list_fields'
])
if list_uri:
keys.add('annal:uri')
return keys
def recordlist_load_keys(list_uri=False):
return recordlist_value_keys(list_uri=list_uri) | {'@id', '@type', '@context'}
def recordlist_create_values(
coll_id="testcoll", list_id="testlist", list_uri=None, update="RecordList"):
"""
Entity values used when creating a record list entity
"""
d = (
{ 'annal:type': "annal:List"
, 'rdfs:label': "%s %s/%s"%(update, coll_id, list_id)
, 'rdfs:comment': "%s help for %s/%s"%(update, coll_id, list_id)
, "annal:display_type": "_enum_list_type/List"
, "annal:default_view": "_view/Default_view"
, "annal:default_type": "_type/Default_type"
# , "annal:list_entity_type": None
, "annal:list_entity_selector": "ALL"
, "annal:list_fields":
[ { "annal:field_id": layout.FIELD_TYPEID+"/Entity_id"
, "annal:field_placement": "small:0,3"
}
, { "annal:field_id": layout.FIELD_TYPEID+"/Entity_label"
, "annal:field_placement": "small:3,9"
}
]
})
if list_uri:
d['annal:uri'] = list_uri
return d
def recordlist_values(
coll_id="testcoll", list_id="testlist", list_uri=None,
update="RecordList", hosturi=TestHostUri):
list_url = recordlist_url(coll_id, list_id)
d = recordlist_create_values(coll_id, list_id, list_uri=list_uri, update=update).copy()
d.update(
{ 'annal:id': list_id
, 'annal:type_id': "_list"
, 'annal:url': list_url
})
return d
def recordlist_read_values(
coll_id="testcoll", list_id="testlist",
update="RecordList", hosturi=TestHostUri):
d = recordlist_values(coll_id, list_id, update=update, hosturi=hosturi).copy()
d.update(
{ '@id': layout.COLL_BASE_LIST_REF%{'id': list_id}
, '@type': ["annal:List"]
, '@context': [{"@base": "../../"}, "../../coll_context.jsonld"]
})
return d
# -----------------------------------------------------------------------------
#
# ----- Data in recordlist view for list description data
#
# -----------------------------------------------------------------------------
def list_view_context_data(
coll_id="testcoll", list_id=None, orig_id=None,
action=None,
list_uri=None,
list_type="List",
list_label=None,
list_descr=None,
list_default_type="_type/Default_type", type_choices=None,
list_default_view="_type/Default_view", view_choices=None,
list_entity_selector="ALL",
list_entity_type="",
list_fields=None,
num_fields=0,
update="RecordList",
continuation_url=None
):
if list_label is None:
if list_id:
#@@TODO: use same format as no list_id; change form data too
list_label = "%s %s/%s"%(update, coll_id, list_id)
else:
list_label = "%s list (%s/)"%(update, coll_id)
if list_fields is None:
if num_fields == 2:
list_fields = (
[ { "annal:field_id": layout.FIELD_TYPEID+"/Entity_id"
, "annal:field_placement": "small:0,3"
}
, { "annal:field_id": layout.FIELD_TYPEID+"/Entity_label"
, "annal:field_placement": "small:3,9"
}
])
if num_fields == 3:
list_fields = (
[ { "annal:field_id": layout.FIELD_TYPEID+"/Entity_id"
, "annal:field_placement": "small:0,3"
}
, { "annal:field_id": layout.FIELD_TYPEID+"/Entity_type"
, "annal:field_placement": "small:3,3"
}
, { "annal:field_id": layout.FIELD_TYPEID+"/Entity_label"
, "annal:field_placement": "small:6,6"
}
])
list_type_choices = get_site_list_types_linked("testcoll")
if type_choices is None:
type_choices = (
[ FieldChoice("", label="(default entity type)")] +
get_site_types_linked("testcoll") +
[ FieldChoice("_type/testtype",
label="RecordType testcoll/_type/testtype",
link=recordtype_url("testcoll", "testtype")
)]
)
if view_choices is None:
view_choices = (
[ FieldChoice("", label="(view id)") ] +
get_site_views_linked("testcoll")
)
if continuation_url is None:
continuation_url = entitydata_list_type_url(coll_id, layout.LIST_TYPEID)
view_label = "List definition"
view_title = (
"%s - %s - Collection %s"%(list_label, view_label, coll_id) if list_label
else
"%s - Collection %s"%(view_label, coll_id)
)
# Target record fields listed in the view description
context_dict = (
{ 'title': view_title
, 'heading': view_label
, 'coll_id': coll_id
, 'type_id': layout.LIST_TYPEID
, 'view_id': 'List_view'
, 'entity_id': list_id or ""
, 'orig_id': orig_id
, 'orig_type': layout.LIST_TYPEID
, 'record_type': "annal:List"
, 'continuation_url': continuation_url
, 'fields':
[ context_field_row(
get_bound_field("List_id", list_id), # 0 (0,0)
get_bound_field("List_type", list_type, # 1 (0,1)
options=list_type_choices),
)
, context_field_row(
get_bound_field("List_label", list_label) # 2 (1,0)
)
, context_field_row(
get_bound_field("List_comment", list_descr) # 3 (2,0)
)
, context_field_row(
get_bound_field("List_default_type", list_default_type, # 4 (3,0)
options=type_choices),
get_bound_field("List_default_view", list_default_view, # 5 (3,1)
options=view_choices),
)
, context_field_row(
get_bound_field("List_entity_selector", list_entity_selector) # 6 (4,0)
)
, context_field_row(
get_bound_field("List_entity_type", list_entity_type) # 7 (5,0)
)
, get_bound_field("List_fields", list_fields) # 8 (6, 0)
]
})
if action:
context_dict['action'] = action
if list_uri:
context_dict['entity_uri'] = list_uri
return context_dict
def list_view_form_data(
coll_id="testcoll", orig_coll=None,
list_id="", orig_id=None,
action=None, cancel=None, task=None,
update="RecordView"):
form_data_dict = (
{ "List_type": "_enum_list_type/List"
, "List_label": "%s list (%s/%s)"%(update, coll_id, list_id)
, "List_comment": "%s help (%s/%s)"%(update, coll_id, list_id)
, "List_default_type": "_type/Default_type"
, "List_default_view": "_view/Default_view"
, "List_entity_selector": "ALL"
# List repeating fields
, "List_fields__0__Field_id": layout.FIELD_TYPEID+"/Entity_id"
, "List_fields__0__Field_placement": "small:0,3"
, "List_fields__1__Field_id": layout.FIELD_TYPEID+"/Entity_label"
, "List_fields__1__Field_placement": "small:3,9"
# Hidden fields
, "action": action
, "view_id": "List_view"
, "orig_id": "orig_list_id"
, "orig_type": "_list"
, "orig_coll": coll_id
, "continuation_url": entitydata_list_type_url(coll_id, "_list")
})
if list_id is not None:
form_data_dict['entity_id'] = list_id
if list_id:
form_data_dict['entity_id'] = list_id
form_data_dict['orig_id'] = list_id
form_data_dict['List_label'] = "%s %s/%s"%(update, coll_id, list_id)
form_data_dict['List_comment'] = "%s help for %s/%s"%(update, coll_id, list_id)
if orig_id:
form_data_dict['orig_id'] = orig_id
if orig_coll:
form_data_dict['orig_coll'] = orig_coll
if action:
form_data_dict['action'] = action
if cancel:
form_data_dict['cancel'] = "Cancel"
elif task:
form_data_dict[task] = task
else:
form_data_dict['save'] = "Save"
return form_data_dict
# -----------------------------------------------------------------------------
#
# ----- Recordview delete confirmation form data
#
# -----------------------------------------------------------------------------
def recordlist_delete_confirm_form_data(list_id=None):
return (
{ 'listlist': list_id,
'list_delete': 'Delete'
})
# End.
| mit | -1,469,320,164,280,331,500 | 37.215054 | 91 | 0.498382 | false |
gaste/dwasp | tests/wasp1/AllAnswerSets/checker_14__backtracking_model_checks___short_version.test.py | 3 | 3871 | input = """
a v b :- not c.
b :- d, not c.
e v a :- f.
g v d :- h.
i v g :- j.
e v a v k :- l.
l v e v a :- l, i.
j v not_i :- g.
c v m :- i.
h :- k.
n v i v e.
o v h.
o v p.
q v o v f.
q v n v c.
not_h v not_c.
g v r.
not_e :- not e.
not_a :- not a.
not_q :- not q.
not_b :- not b.
:- i, not_i.
:- c, not_c.
"""
output = """
{a, b, d, f, h, n, not_c, not_e, not_q, p, r}
{a, b, d, f, h, n, not_e, not_h, not_q, p, r}
{a, c, d, f, h, i, not_b, not_e, not_h, not_q, p, r}
{a, c, f, g, h, i, j, not_b, not_e, not_h, not_q, p}
{a, e, g, h, j, not_b, not_c, p, q}
{a, e, g, h, j, not_b, not_h, p, q}
{a, e, g, h, not_b, not_c, not_i, p, q}
{a, e, g, h, not_b, not_h, not_i, p, q}
{a, e, g, j, not_b, not_c, o, q}
{a, e, g, j, not_b, not_h, o, q}
{a, e, g, not_b, not_c, not_i, o, q}
{a, e, g, not_b, not_h, not_i, o, q}
{a, e, not_b, not_c, o, q, r}
{a, e, not_b, not_h, o, q, r}
{a, f, g, h, j, n, not_b, not_c, not_e, not_q, p}
{a, f, g, h, j, n, not_b, not_e, not_h, not_q, p}
{a, f, g, h, n, not_b, not_c, not_e, not_i, not_q, p}
{a, f, g, h, n, not_b, not_e, not_h, not_i, not_q, p}
{a, g, h, i, j, m, not_b, not_c, not_e, p, q}
{a, g, h, i, j, m, not_b, not_e, not_h, p, q}
{a, g, h, j, n, not_b, not_c, not_e, p, q}
{a, g, h, j, n, not_b, not_e, not_h, p, q}
{a, g, h, n, not_b, not_c, not_e, not_i, p, q}
{a, g, h, n, not_b, not_e, not_h, not_i, p, q}
{a, g, i, j, m, not_b, not_c, not_e, o, q}
{a, g, i, j, m, not_b, not_e, not_h, o, q}
{a, g, j, n, not_b, not_c, not_e, not_q, o}
{a, g, j, n, not_b, not_e, not_h, not_q, o}
{a, g, n, not_b, not_c, not_e, not_i, not_q, o}
{a, g, n, not_b, not_e, not_h, not_i, not_q, o}
{a, i, m, not_b, not_c, not_e, o, q, r}
{a, i, m, not_b, not_e, not_h, o, q, r}
{a, n, not_b, not_c, not_e, not_q, o, r}
{a, n, not_b, not_e, not_h, not_q, o, r}
{b, d, e, f, h, n, not_a, not_c, not_q, p, r}
{b, d, e, f, h, n, not_a, not_h, not_q, p, r}
{b, d, e, h, not_a, not_c, p, q, r}
{b, d, e, h, not_a, not_h, p, q, r}
{b, d, h, i, m, not_a, not_c, not_e, p, q, r}
{b, d, h, i, m, not_a, not_e, not_h, p, q, r}
{b, d, h, n, not_a, not_c, not_e, p, q, r}
{b, d, h, n, not_a, not_e, not_h, p, q, r}
{b, e, f, g, h, j, n, not_a, not_c, not_q, p}
{b, e, f, g, h, j, n, not_a, not_h, not_q, p}
{b, e, f, g, h, n, not_a, not_c, not_i, not_q, p}
{b, e, f, g, h, n, not_a, not_h, not_i, not_q, p}
{b, e, g, h, j, not_a, not_c, p, q}
{b, e, g, h, j, not_a, not_h, p, q}
{b, e, g, h, not_a, not_c, not_i, p, q}
{b, e, g, h, not_a, not_h, not_i, p, q}
{b, e, g, j, not_a, not_c, o, q}
{b, e, g, j, not_a, not_h, o, q}
{b, e, g, not_a, not_c, not_i, o, q}
{b, e, g, not_a, not_h, not_i, o, q}
{b, e, not_a, not_c, o, q, r}
{b, e, not_a, not_h, o, q, r}
{b, g, h, i, j, m, not_a, not_c, not_e, p, q}
{b, g, h, i, j, m, not_a, not_e, not_h, p, q}
{b, g, h, j, n, not_a, not_c, not_e, p, q}
{b, g, h, j, n, not_a, not_e, not_h, p, q}
{b, g, h, n, not_a, not_c, not_e, not_i, p, q}
{b, g, h, n, not_a, not_e, not_h, not_i, p, q}
{b, g, i, j, m, not_a, not_c, not_e, o, q}
{b, g, i, j, m, not_a, not_e, not_h, o, q}
{b, g, j, n, not_a, not_c, not_e, not_q, o}
{b, g, j, n, not_a, not_e, not_h, not_q, o}
{b, g, n, not_a, not_c, not_e, not_i, not_q, o}
{b, g, n, not_a, not_e, not_h, not_i, not_q, o}
{b, i, m, not_a, not_c, not_e, o, q, r}
{b, i, m, not_a, not_e, not_h, o, q, r}
{b, n, not_a, not_c, not_e, not_q, o, r}
{b, n, not_a, not_e, not_h, not_q, o, r}
{c, d, e, f, h, not_a, not_b, not_h, not_q, p, r}
{c, d, h, i, not_a, not_b, not_e, not_h, p, q, r}
{c, e, f, g, h, j, not_a, not_b, not_h, not_q, p}
{c, e, f, g, h, not_a, not_b, not_h, not_i, not_q, p}
{c, e, g, j, not_a, not_b, not_h, not_q, o}
{c, e, g, not_a, not_b, not_h, not_i, not_q, o}
{c, e, not_a, not_b, not_h, not_q, o, r}
{c, g, h, i, j, not_a, not_b, not_e, not_h, p, q}
{c, g, i, j, not_a, not_b, not_e, not_h, not_q, o}
{c, i, not_a, not_b, not_e, not_h, not_q, o, r}
"""
| apache-2.0 | 9,165,852,336,120,066,000 | 33.5625 | 53 | 0.451821 | false |
hozn/coilmq | coilmq/auth/__init__.py | 3 | 1193 | """
Authentication providers.
Because authentication providers are instantiated and configured in the application scope
(and not in the request handler), the authenticator implementations must be thread-safe.
"""
import abc
__authors__ = ['"Hans Lellelid" <[email protected]>']
__copyright__ = "Copyright 2009 Hans Lellelid"
__license__ = """Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
class Authenticator(object):
""" Abstract base class for authenticators. """
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def authenticate(self, login, passcode):
"""
Authenticate the login and passcode.
@return: Whether user is authenticated.
@rtype: C{bool}
"""
| apache-2.0 | 2,274,441,923,661,447,700 | 34.088235 | 89 | 0.728416 | false |
haxsaw/actuator | src/tests/config_tests.py | 1 | 39940 | #
# Copyright (c) 2014 Tom Carroll
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Created on 13 Jul 2014
'''
import threading
from actuator import *
from actuator.config import _Dependency, _ConfigTask, StructuralTask,\
with_config_options
from actuator.infra import IPAddressable
MyConfig = None
search_path = ["p1", "p2", "p3"]
def setup():
global MyConfig
class MyTestConfig(ConfigModel):
with_searchpath(*search_path)
t1 = NullTask("nt")
t2 = NullTask("temp")
with_dependencies(t1 | t2)
MyConfig = MyTestConfig
def make_dep_tuple_set(config):
return set([(d.from_task.path, d.to_task.path) for d in config.get_class_dependencies()])
def pretty_deps(deps):
return [("{}-{}".format(d.from_task.name, str(id(d.from_task))[-4:]),
"{}-{}".format(d.to_task.name, str(id(d.to_task))[-4:]))
for d in deps]
def test01():
assert MyConfig
def test02():
expected_path = set(search_path)
assert expected_path == set(MyConfig.__searchpath__)
def test03():
assert 1 == len(MyConfig.__dependencies__)
def test04():
try:
class T4Config(ConfigModel):
t1 = NullTask("nt")
with_dependencies(t1 | "other")
raise Exception("Failed to catch dependency creation with non-task")
except:
assert True
def test05():
try:
_ = _Dependency(NullTask("nt"), "other")
raise Exception("Failed to catch _Dependency creation with 'to' as non-task")
except:
assert True
def test06():
try:
_ = _Dependency("other", NullTask("nt"))
raise Exception("Failed to catch _Dependency creation with 'from' as non-task")
except:
assert True
def test07():
assert 2 == len(MyConfig._node_dict)
def test08():
try:
class TC8(ConfigModel):
t1 = NullTask("nt")
t2 = NullTask("nt")
t3 = NullTask("nt")
with_dependencies(t1 | t2,
t2 | t3,
t3 | t1)
assert False, "Cycle in dependencies was not detected"
except ConfigException, _:
assert True
def test09():
class TC9(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(t1 | t2 | t3)
assert make_dep_tuple_set(TC9) == set([("t1", "t2"), ("t2", "t3")])
def test10():
try:
class TC10(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(t1 | t2 | t3 | t1)
assert False, "Cycle in dependencies was not detected"
except ConfigException, _:
assert True
def test10a():
try:
class TC10a(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(t1 | t2 | t1)
assert False, "Cycle in dependencies was not detected"
except ConfigException, _:
assert True
def test11():
try:
class TC11(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
t4 = NullTask("t4", path="t4")
t5 = NullTask("t5", path="t5")
with_dependencies(t1 | t2 | t3 | t4)
with_dependencies(t3 | t4 | t5)
with_dependencies(t4 | t2)
assert False, "Cycle in dependencies was not detected"
except ConfigException, _:
assert True
def test12():
class TC12(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(TaskGroup(t1, t2) | t3)
assert make_dep_tuple_set(TC12) == set([("t1", "t3"), ("t2", "t3")])
def test13():
class TC13(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
t4 = NullTask("t4", path="t4")
with_dependencies(TaskGroup(t1, t2 | t3) | t4)
assert make_dep_tuple_set(TC13) == set([("t2", "t3"), ("t1", "t4"), ("t3", "t4")])
def test14():
class TC14(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
t4 = NullTask("t4", path="t4")
with_dependencies(TaskGroup(t1, t2) | TaskGroup(t3, t4))
assert make_dep_tuple_set(TC14) == set([("t2", "t3"), ("t1", "t4"),
("t1", "t3"), ("t2", "t4")])
def test15():
class TC15(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
t4 = NullTask("t4", path="t4")
with_dependencies(TaskGroup(t1 | t2, t3 | t4))
assert make_dep_tuple_set(TC15) == set([("t1", "t2"), ("t3", "t4")])
def test16():
class TC16(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(t1 | TaskGroup(t2, t3))
assert make_dep_tuple_set(TC16) == set([("t1", "t3"), ("t1", "t2")])
def test17():
class TC17(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
t4 = NullTask("t4", path="t4")
t5 = NullTask("t5", path="t5")
t6 = NullTask("t6", path="t6")
t7 = NullTask("t7", path="t7")
t8 = NullTask("t8", path="t8")
t9 = NullTask("t9", path="t9")
t0 = NullTask("t0", path="t0")
with_dependencies(TaskGroup(t1 | t2, TaskGroup(t3, t4)) | t5 |
TaskGroup(TaskGroup(t6, t7, t8), t9 | t0))
assert make_dep_tuple_set(TC17) == set([("t1", "t2"), ("t2", "t5"),
("t3", "t5"), ("t4", "t5"),
("t5", "t6"), ("t5", "t7"),
("t5", "t8"), ("t5", "t9"),
("t9", "t0")])
def test18():
class TC18(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(TaskGroup(t1, TaskGroup(t2, TaskGroup(t3))))
assert make_dep_tuple_set(TC18) == set()
def test19():
class TC19(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(t1 | t2)
with_dependencies(t2 | t3)
assert make_dep_tuple_set(TC19) == set([("t1", "t2"), ("t2", "t3")])
def test20():
class TC20(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
t4 = NullTask("t4", path="t4")
t5 = NullTask("t5", path="t5")
t6 = NullTask("t6", path="t6")
t7 = NullTask("t7", path="t7")
t8 = NullTask("t8", path="t8")
t9 = NullTask("t9", path="t9")
t0 = NullTask("t0", path="t0")
with_dependencies(TaskGroup(t1 | t2, TaskGroup(t3, t4)) | t5)
with_dependencies(t5 | TaskGroup(TaskGroup(t6, t7, t8), t9 | t0))
assert make_dep_tuple_set(TC20) == set([("t1", "t2"), ("t2", "t5"),
("t3", "t5"), ("t4", "t5"),
("t5", "t6"), ("t5", "t7"),
("t5", "t8"), ("t5", "t9"),
("t9", "t0")])
def test21():
class TC21(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(t1 | t2)
with_dependencies(t2 | t3)
with_dependencies(t1 | t2)
assert make_dep_tuple_set(TC21) == set([("t1", "t2"), ("t2", "t3")])
def test22():
class First(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(t1 | t3, t2 | t3)
class Second(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(TaskGroup(t1, t2) | t3)
assert make_dep_tuple_set(First) == make_dep_tuple_set(Second)
def test23():
class First(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
with_dependencies(TaskGroup(t1, t1 | t2))
class Second(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
with_dependencies(t1 | t2)
assert make_dep_tuple_set(First) == make_dep_tuple_set(Second)
def test24():
class First(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(TaskGroup(t1, t2, t3), t1 | t3)
class Second(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(TaskGroup(t1 | t3, t2))
assert make_dep_tuple_set(First) == make_dep_tuple_set(Second)
def test25():
class First(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(t1 | t2 | t3, t1 | TaskGroup(t2, t3))
class Second(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(t1 | t2 | t3, t1 | t3)
assert make_dep_tuple_set(First) == make_dep_tuple_set(Second)
def test26():
TG = TaskGroup
class First(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
t4 = NullTask("t4", path="t4")
t5 = NullTask("t5", path="t5")
with_dependencies(TG(TG(t1, t2, t3), t4 | t5),
t2 | t4,
t3 | t5)
class Second(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
t4 = NullTask("t4", path="t4")
t5 = NullTask("t5", path="t5")
with_dependencies(t2 | t4 | t5,
t3 | t5)
assert make_dep_tuple_set(First) == make_dep_tuple_set(Second)
#tests after this point will use these classes
class Capture(object):
def __init__(self):
self.performed = []
def __call__(self, name, task):
self.performed.append((name, task))
def pos(self, name, task):
return self.performed.index((name, task))
class ReportingTask(_ConfigTask, StructuralTask):
def __init__(self, name, target=None, report=lambda n, o: (n, o), **kwargs):
super(ReportingTask, self).__init__(name, task_role=target, **kwargs)
self.target = target
self.report = report
def get_init_args(self):
args, kwargs = super(ReportingTask, self).get_init_args()
try:
kwargs.pop("task_role")
except Exception, _:
pass
kwargs["target"] = self.target
kwargs["report"] = self.report
return args, kwargs
def perform(self):
comp = self.get_task_role()
if not isinstance(comp, basestring):
if isinstance(comp.name, basestring):
comp = comp.name
else:
comp = comp.name.value()
self.report(comp, self.name)
class BogusServerRef(IPAddressable):
def get_ip(self):
return "8.8.8.8"
admin_ip = property(get_ip)
def test27():
cap = Capture()
class PingNamespace(NamespaceModel):
ping_target = Role("ping_target", host_ref=BogusServerRef())
ns = PingNamespace()
class PingConfig(ConfigModel):
ping_task = ReportingTask("ping", target=PingNamespace.ping_target, report=cap)
cfg = PingConfig()
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
no_delay=True)
ea.perform_config()
assert cap.performed
def test28():
cap = Capture()
class PingNamespace(NamespaceModel):
ping_target = Role("ping_target", host_ref=BogusServerRef())
ns = PingNamespace()
class PingConfig(ConfigModel):
t3 = ReportingTask("t3", target=PingNamespace.ping_target, report=cap, repeat_count=1)
t2 = ReportingTask("t2", target=PingNamespace.ping_target, report=cap, repeat_count=1)
t1 = ReportingTask("t1", target=PingNamespace.ping_target, report=cap, repeat_count=1)
with_dependencies(t1 | t2 | t3)
cfg = PingConfig()
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
no_delay=True)
ea.perform_config()
assert (cap.pos("ping_target", PingConfig.t1.name) <
cap.pos("ping_target", PingConfig.t2.name) <
cap.pos("ping_target", PingConfig.t3.name) )
def test29():
cap = Capture()
class PingNamespace(NamespaceModel):
ping_target = Role("ping_target", host_ref=BogusServerRef())
ns = PingNamespace()
class PingConfig(ConfigModel):
t3 = ReportingTask("t3", target=PingNamespace.ping_target, report=cap)
t2 = ReportingTask("t2", target=PingNamespace.ping_target, report=cap)
t1 = ReportingTask("t1", target=PingNamespace.ping_target, report=cap)
t4 = ReportingTask("t4", target=PingNamespace.ping_target, report=cap)
t5 = ReportingTask("t5", target=PingNamespace.ping_target, report=cap)
with_dependencies(t1 | t2 | t3,
t4 | t2,
t4 | t3,
t5 | t3)
cfg = PingConfig()
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
no_delay=True)
ea.perform_config()
assert (cap.pos("ping_target", PingConfig.t1.name) <
cap.pos("ping_target", PingConfig.t2.name) <
cap.pos("ping_target", PingConfig.t3.name) and
cap.performed[-1] == ("ping_target", PingConfig.t3.name) and
cap.pos("ping_target", PingConfig.t4.name) <
cap.pos("ping_target", PingConfig.t2.name))
def test30():
cap = Capture()
class ElasticNamespace(NamespaceModel):
ping_targets = MultiRole(Role("ping-target", host_ref=BogusServerRef()))
pong_targets = MultiRole(Role("pong-target", host_ref=BogusServerRef()))
ns = ElasticNamespace()
class ElasticConfig(ConfigModel):
ping = ReportingTask("ping", target=ElasticNamespace.ping_targets, report=cap)
pong = ReportingTask("pong", target=ElasticNamespace.pong_targets, report=cap)
with_dependencies(ping | pong)
for i in range(5):
_ = ns.ping_targets[i]
cfg = ElasticConfig()
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
no_delay=True)
ea.perform_config()
assert (len(ns.ping_targets) == 5 and
(set(["0", "1", "2", "3", "4"]) == set(ns.ping_targets.keys())) and
len(ns.pong_targets) == 0)
def test31():
class VarCapture(_ConfigTask):
def __init__(self, name, task_role, **kwargs):
super(VarCapture, self).__init__(name, task_role=task_role, **kwargs)
self.vars = {}
def perform(self):
vv = self._model_instance.namespace_model_instance.comp.get_visible_vars()
self.vars.update({v.name:v.get_value(self.get_task_role())
for v in vv.values()})
class SimpleNS(NamespaceModel):
with_variables(Var("ID", "wrong"),
Var("ONE", "1"),
Var("TWO", "2"),
Var("THREE", "3"))
comp = Role("test-comp", host_ref="!{ID}").add_variable(Var("ID", "right!"),
Var("THREE", "drei"))
class SimpleCfg(ConfigModel):
comp_task = VarCapture("varcap", SimpleNS.comp)
ns = SimpleNS()
cfg = SimpleCfg()
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
no_delay=True)
ea.perform_config()
assert (cfg.comp_task.vars["ID"] == "right!" and
cfg.comp_task.vars["THREE"] == "drei" and
cfg.comp_task.vars["ONE"] == "1" and
cfg.comp_task.vars["TWO"] == "2")
def test32():
class VarCapture(_ConfigTask):
def __init__(self, name, task_role, **kwargs):
super(VarCapture, self).__init__(name, task_role=task_role, **kwargs)
self.vars = {}
def perform(self):
vv = self._model_instance.namespace_model_instance.get_visible_vars()
self.vars.update({v.name:v.get_value(self.get_task_role())
for v in vv.values()})
class SimpleNS(NamespaceModel):
with_variables(Var("ID", "wrong"),
Var("ONE", "1"),
Var("TWO", "2"),
Var("THREE", "3"))
comp = Role("test-comp", host_ref="!{ID}").add_variable(Var("ID", "right!"),
Var("THREE", "drei"))
class SimpleCfg(ConfigModel):
comp_task = VarCapture("varcap", SimpleNS.comp)
ns = SimpleNS()
cfg = SimpleCfg()
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
no_delay=True)
ea.perform_config()
assert (cfg.comp_task.vars["ID"] == "wrong" and
cfg.comp_task.vars["THREE"] == "3" and
cfg.comp_task.vars["ONE"] == "1" and
cfg.comp_task.vars["TWO"] == "2")
def test33():
class First(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(t1 | t2 | t3, t1 | t2 & t3)
class Second(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(t1 | t2 | t3, t1 | t3)
assert make_dep_tuple_set(First) == make_dep_tuple_set(Second)
def test34():
class First(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(t1 | t2 | t3, t1 | (t2 & t3))
class Second(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
with_dependencies(t1 | t2 | t3, t1 | t3)
assert make_dep_tuple_set(First) == make_dep_tuple_set(Second)
def test35():
#this is a re-statement of test26 using '&' instead of
#TasgGroup (TG). It's a pretty literal translation,
#although algebraically one set of parends isn't needed.
TG = TaskGroup
class First(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
t4 = NullTask("t4", path="t4")
t5 = NullTask("t5", path="t5")
with_dependencies((t1 & t2 & t3) & (t4 | t5),
t2 | t4,
t3 | t5)
class Second(ConfigModel):
t1 = NullTask("t1", path="t1")
t2 = NullTask("t2", path="t2")
t3 = NullTask("t3", path="t3")
t4 = NullTask("t4", path="t4")
t5 = NullTask("t5", path="t5")
with_dependencies(t2 | t4 | t5,
t3 | t5)
assert make_dep_tuple_set(First) == make_dep_tuple_set(Second)
def test36():
class NS(NamespaceModel):
grid = MultiRole(Role("grid", host_ref="127.0.0.1"))
ns = NS()
class Cfg(ConfigModel):
grid_prep = MultiTask("grid_prep", NullTask("gp", path="gp"), NS.grid)
cfg = Cfg()
for i in range(5):
_ = ns.grid[i]
cfg.set_namespace(ns)
cfg.grid_prep.fix_arguments()
assert len(cfg.grid_prep.instances) == 5
def test37():
class NS(NamespaceModel):
grid = MultiRole(Role("grid", host_ref="127.0.0.1"))
ns = NS()
class Cfg(ConfigModel):
grid_prep = MultiTask("grid_prep", NullTask("gp", path="gp"), NS.grid)
cfg = Cfg()
_ = ns.grid[0]
cfg.set_namespace(ns)
cfg.grid_prep.fix_arguments()
assert (len(cfg.grid_prep.instances) == 1 and
cfg.grid_prep.instances.value()[0].name == "gp-grid_0")
def test38():
class NS(NamespaceModel):
grid = MultiRole(Role("grid", host_ref="127.0.0.1"))
ns = NS()
class Cfg(ConfigModel):
grid_prep = MultiTask("grid_prep", NullTask("gp", path="gp"), NS.grid)
cfg = Cfg()
_ = ns.grid[0]
cfg.set_namespace(ns)
cfg.grid_prep.fix_arguments()
assert (len(cfg.grid_prep.instances) == 1 and
cfg.grid_prep.instances.value()[0].name == "gp-grid_0")
def test39():
cap = Capture()
class NS(NamespaceModel):
grid = MultiRole(Role("grid", host_ref="127.0.0.1"))
ns = NS()
class Cfg(ConfigModel):
grid_prep = MultiTask("grid_prep", ReportingTask("rt", report=cap),
NS.grid)
cfg = Cfg()
for i in range(5):
_ = ns.grid[i]
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
no_delay=True)
ea.perform_config()
assert len(cfg.grid_prep.instances) == 5 and len(cap.performed) == 5
def test40():
cap = Capture()
class NS(NamespaceModel):
grid = MultiRole(Role("grid", host_ref="127.0.0.1"))
static = Role("static", host_ref="127.0.0.1")
ns = NS()
class Cfg(ConfigModel):
grid_prep = MultiTask("grid_prep", ReportingTask("rt", report=cap),
NS.grid)
before = ReportingTask("before", target=NS.static, report=cap)
after = ReportingTask("after", target=NS.static, report=cap)
with_dependencies(before | grid_prep | after)
cfg = Cfg()
for i in range(3):
_ = ns.grid[i]
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
no_delay=True)
ea.perform_config()
assert (len(cfg.grid_prep.instances) == 3 and
len(cap.performed) == 5 and
(cap.pos("static", "before") < cap.pos("grid_0", "rt-grid_0") and
cap.pos("static", "before") < cap.pos("grid_1", "rt-grid_1") and
cap.pos("static", "before") < cap.pos("grid_2", "rt-grid_2") and
cap.pos("static", "after") > cap.pos("grid_0", "rt-grid_0") and
cap.pos("static", "after") > cap.pos("grid_1", "rt-grid_1") and
cap.pos("static", "after") > cap.pos("grid_2", "rt-grid_2")))
def test41():
cap = Capture()
class NS(NamespaceModel):
grid = MultiRole(Role("grid", host_ref="127.0.0.1"))
ns = NS()
class Cfg(ConfigModel):
grid_prep = MultiTask("grid_prep", ReportingTask("rt", report=cap),
NS.q.grid)
cfg = Cfg()
for i in range(5):
_ = ns.grid[i]
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
no_delay=True)
ea.perform_config()
assert len(cfg.grid_prep.instances) == 5 and len(cap.performed) == 5
def test42():
cap = Capture()
class NS(NamespaceModel):
grid1 = MultiRole(Role("grid1", host_ref="127.0.0.1"))
grid2 = MultiRole(Role("grid2", host_ref="127.0.0.1"))
ns = NS()
class Cfg(ConfigModel):
grid_prep = MultiTask("grid_prep", ReportingTask("rt", report=cap),
NS.q.union(NS.q.grid1, NS.q.grid2))
cfg = Cfg()
for i in range(5):
_ = ns.grid1[i]
for i in range(3):
_ = ns.grid2[i]
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
no_delay=True)
ea.perform_config()
assert len(cfg.grid_prep.instances) == 8 and len(cap.performed) == 8
def test43():
"""
test43: set a default task performance host using the 'default_task_role'
kwarg of with_config_options(), and then create a task with no task_role.
create an instance of the config, and see that get_task_host() on the
config's task returns the role's get_ip address
"""
cap = Capture()
class NS(NamespaceModel):
task_performer = Role("tp", host_ref="127.0.0.1")
ns = NS()
class Cfg(ConfigModel):
with_config_options(default_task_role=NS.task_performer)
a_task = ReportingTask("atask", report=cap)
cfg = Cfg()
cfg.set_namespace(ns)
assert cfg.a_task.get_task_host() == "127.0.0.1"
def test44():
"""
test44: like test43, but get the task host from a StaticServer in the
infra model
"""
cap = Capture()
class Infra(InfraModel):
setup_server = StaticServer("setup_helper", "127.0.0.1")
infra = Infra("helper")
class NS(NamespaceModel):
task_performer = Role("tp", host_ref=Infra.setup_server)
ns = NS()
ns.set_infra_model(infra)
class Cfg(ConfigModel):
with_config_options(default_task_role=NS.task_performer)
a_task = ReportingTask("atask", report=cap)
cfg = Cfg()
cfg.set_namespace(ns)
assert cfg.a_task.get_task_host() == "127.0.0.1"
def test44a():
"""
test44a: like test44, setting the role on the task instead of getting
it via the default for the config model
"""
cap = Capture()
class Infra(InfraModel):
setup_server = StaticServer("setup_helper", "127.0.0.1")
infra = Infra("helper")
infra.setup_server.fix_arguments()
class NS(NamespaceModel):
task_performer = Role("tp", host_ref=Infra.setup_server)
ns = NS()
ns.set_infra_model(infra)
ns.task_performer.fix_arguments()
class Cfg(ConfigModel):
a_task = ReportingTask("atask", report=cap, target=NS.task_performer)
cfg = Cfg()
cfg.set_namespace(ns)
cfg.a_task.fix_arguments()
assert cfg.a_task.get_task_host() == "127.0.0.1"
def test45():
"""
test45: check if you drive config tasks from a nested config class
"""
class Infra(InfraModel):
setup_server = StaticServer("setup_helper", "127.0.0.1")
infra = Infra("helper")
infra.setup_server.fix_arguments()
class NS(NamespaceModel):
task_performer = Role("tp", host_ref=Infra.setup_server)
ns = NS()
ns.set_infra_model(infra)
ns.task_performer.fix_arguments()
cap = Capture()
class InnerCfg(ConfigModel):
task = ReportingTask("inner_task", report=cap)
class OuterCfg(ConfigModel):
wrapped_task = ConfigClassTask("wrapper", InnerCfg, task_role=NS.task_performer)
cfg = OuterCfg()
cfg.set_namespace(ns)
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
infra_model_instance=infra, no_delay=True)
try:
ea.perform_config()
except ExecutionException, e:
import traceback
for task, etype, value, tb in ea.get_aborted_tasks():
print ">>>Task {} failed with the following:".format(task.name)
traceback.print_exception(etype, value, tb)
print
assert False, e.message
assert len(cap.performed) == 1
def test46():
"""
test46: wrap a config class with a sequence of tasks in ConfigClassTask
wrapper and ensure they all get performed in order
"""
class Infra(InfraModel):
setup_server = StaticServer("setup_helper", "127.0.0.1")
infra = Infra("helper")
class NS(NamespaceModel):
task_performer = Role("tp", host_ref=Infra.setup_server)
ns = NS()
ns.set_infra_model(infra)
cap = Capture()
class InnerCfg(ConfigModel):
t1 = ReportingTask("inner_task1", report=cap)
t2 = ReportingTask("inner_task2", report=cap)
t3 = ReportingTask("inner_task3", report=cap)
with_dependencies(t1 | t2 | t3)
class OuterCfg(ConfigModel):
wrapped_task = ConfigClassTask("wrapper", InnerCfg, task_role=NS.task_performer)
cfg = OuterCfg()
cfg.set_namespace(ns)
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
infra_model_instance=infra, no_delay=True)
try:
ea.perform_config()
except ExecutionException, e:
import traceback
for task, etype, value, tb in ea.get_aborted_tasks():
print ">>>Task {} failed with the following:".format(task.name)
traceback.print_exception(etype, value, tb)
print
assert False, e.message
assert (len(cap.performed) == 3 and
cap.pos("tp", "inner_task1") < cap.pos("tp", "inner_task2") and
cap.pos("tp", "inner_task2") < cap.pos("tp", "inner_task3"))
def test47():
"""
test47: wrap a config class with a sequence of tasks in ConfigClassTask
wrapper, then drive the creation of instances of the ConfigClassTask
with a MultiTask wrapper.
"""
class IPGen(object):
def __init__(self):
self.host_part = 0
def __call__(self, context):
self.host_part += 1
return "127.0.0.{}".format(self.host_part)
ipgen = IPGen()
class Infra(InfraModel):
setup_server = MultiResource(StaticServer("setup_helper", ipgen))
infra = Infra("helper")
class NS(NamespaceModel):
task_role = MultiRole(Role("tp",
host_ref=ctxt.model.infra.setup_server[ctxt.name]))
ns = NS()
ns.set_infra_model(infra)
for i in range(3):
_ = ns.task_role[i]
cap = Capture()
class InnerCfg(ConfigModel):
t1 = ReportingTask("inner_task1", report=cap)
t2 = ReportingTask("inner_task2", report=cap)
t3 = ReportingTask("inner_task3", report=cap)
with_dependencies(t1 | t2 | t3)
class OuterCfg(ConfigModel):
wrapped_task = MultiTask("setupSuite",
ConfigClassTask("wrapper", InnerCfg),
NS.q.task_role.all())
cfg = OuterCfg()
cfg.set_namespace(ns)
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
infra_model_instance=infra, no_delay=True)
try:
ea.perform_config()
except ExecutionException, e:
import traceback
for task, etype, value, tb in ea.get_aborted_tasks():
print ">>>Task {} failed with the following:".format(task.name)
traceback.print_exception(etype, value, tb)
print
assert False, e.message
assert len(cap.performed) == 9
def test48():
"""
test48: wrap a config class with a sequence of tasks in ConfigClassTask
wrapper and ensure they all get performed in order, and the set up a final
task in the outer config class and ensure that is is performed last
"""
class Infra(InfraModel):
setup_server = StaticServer("setup_helper", "127.0.0.1")
infra = Infra("helper")
class NS(NamespaceModel):
task_performer = Role("tp", host_ref=Infra.setup_server)
default = Role("default", host_ref="127.0.1.1")
ns = NS()
ns.set_infra_model(infra)
cap = Capture()
class InnerCfg(ConfigModel):
t1 = ReportingTask("inner_task1", report=cap)
t2 = ReportingTask("inner_task2", report=cap)
t3 = ReportingTask("inner_task3", report=cap)
with_dependencies(t1 | t2 | t3)
class OuterCfg(ConfigModel):
wrapped_task = ConfigClassTask("wrapper", InnerCfg, task_role=NS.task_performer)
final = ReportingTask("final", target=NS.default, report=cap)
with_dependencies(wrapped_task | final)
cfg = OuterCfg()
cfg.set_namespace(ns)
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
infra_model_instance=infra, no_delay=True)
try:
ea.perform_config()
except ExecutionException, e:
import traceback
for task, etype, value, tb in ea.get_aborted_tasks():
print ">>>Task {} failed with the following:".format(task.name)
traceback.print_exception(etype, value, tb)
print
assert False, e.message
assert (len(cap.performed) == 4 and
cap.pos("tp", "inner_task1") < cap.pos("tp", "inner_task2") and
cap.pos("tp", "inner_task2") < cap.pos("tp", "inner_task3") and
cap.pos("tp", "inner_task3") < cap.pos("default", "final"))
def test49():
"""
test49: wrap a config class with a sequence of tasks in ConfigClassTask
wrapper and ensure they all get performed in order, and then set up
initial and final tasks in the outer config and make sure everything is
happening in the right order
"""
class Infra(InfraModel):
setup_server = StaticServer("setup_helper", "127.0.0.1")
infra = Infra("helper")
class NS(NamespaceModel):
task_performer = Role("tp", host_ref=Infra.setup_server)
default = Role("default", host_ref="127.0.1.1")
ns = NS()
ns.set_infra_model(infra)
cap = Capture()
class InnerCfg(ConfigModel):
t1 = ReportingTask("inner_task1", report=cap)
t2 = ReportingTask("inner_task2", report=cap)
t3 = ReportingTask("inner_task3", report=cap)
with_dependencies(t1 | t2 | t3)
class OuterCfg(ConfigModel):
wrapped_task = ConfigClassTask("wrapper", InnerCfg, task_role=NS.task_performer)
initial = ReportingTask("initial", target=NS.default, report=cap)
final = ReportingTask("final", target=NS.default, report=cap)
with_dependencies(initial | wrapped_task | final)
cfg = OuterCfg()
cfg.set_namespace(ns)
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
infra_model_instance=infra, no_delay=True)
try:
ea.perform_config()
except ExecutionException, e:
import traceback
for task, etype, value, tb in ea.get_aborted_tasks():
print ">>>Task {} failed with the following:".format(task.name)
traceback.print_exception(etype, value, tb)
print
assert False, e.message
assert (len(cap.performed) == 5 and
cap.pos("tp", "inner_task1") < cap.pos("tp", "inner_task2") and
cap.pos("tp", "inner_task2") < cap.pos("tp", "inner_task3") and
cap.pos("tp", "inner_task3") < cap.pos("default", "final") and
cap.pos("default", "initial") < cap.pos("tp", "inner_task1"))
def test50():
"""
test50: wrap a config class with a sequence of tasks in ConfigClassTask
wrapper, then drive the creation of instances of the ConfigClassTask
with a MultiTask wrapper.
"""
class IPGen(object):
def __init__(self):
self.host_part = 0
def __call__(self, context):
self.host_part += 1
return "127.0.0.{}".format(self.host_part)
ipgen = IPGen()
class Infra(InfraModel):
setup_server = MultiResource(StaticServer("setup_helper", ipgen))
infra = Infra("helper")
class NS(NamespaceModel):
task_role = MultiRole(Role("tp",
host_ref=ctxt.model.infra.setup_server[ctxt.name]))
default = Role("default", "127.0.1.1")
ns = NS()
ns.set_infra_model(infra)
for i in range(3):
_ = ns.task_role[i]
cap = Capture()
class InnerCfg(ConfigModel):
t1 = ReportingTask("inner_task1", report=cap)
t2 = ReportingTask("inner_task2", report=cap)
t3 = ReportingTask("inner_task3", report=cap)
with_dependencies(t1 | t2 | t3)
class OuterCfg(ConfigModel):
wrapped_task = MultiTask("setupSuite",
ConfigClassTask("wrapper", InnerCfg),
NS.q.task_role.all())
initial = ReportingTask("initial", target=NS.default, report=cap)
final = ReportingTask("final", target=NS.default, report=cap)
with_dependencies(initial | wrapped_task | final)
cfg = OuterCfg()
cfg.set_namespace(ns)
ea = ExecutionAgent(config_model_instance=cfg, namespace_model_instance=ns,
infra_model_instance=infra, no_delay=True)
try:
ea.perform_config()
except ExecutionException, e:
import traceback
for task, etype, value, tb in ea.get_aborted_tasks():
print ">>>Task {} failed with the following:".format(task.name)
traceback.print_exception(etype, value, tb)
print
assert False, e.message
assert (len(cap.performed) == 11 and
cap.pos("default", "final") == len(cap.performed) -1 and
cap.pos("default", "initial") == 0)
def test51():
class SkipemNS(NamespaceModel):
with_variables(Var("ONE", "1"),
Var("TWO", "2"),
Var("THREE", "!{ONE}+!{TWO}", in_env=False))
r = Role("me", host_ref="127.0.0.1")
ns = SkipemNS()
class SkipConfig(ConfigModel):
t = NullTask("env-test", task_role=SkipemNS.r)
cfg = SkipConfig()
cfg.set_namespace(ns)
assert "THREE" in cfg.t.task_variables() and "THREE" not in cfg.t.task_variables(for_env=True)
def do_all():
setup()
for k, v in globals().items():
if k.startswith("test") and callable(v):
v()
if __name__ == "__main__":
do_all()
| mit | 2,926,311,277,934,066,700 | 34.251545 | 103 | 0.555133 | false |
rfinn/LCS | paper1code/LCSmeasuresnr24NSA.py | 1 | 6768 | #!/usr/bin/env python
"""
"""
from pylab import *
import os, pyfits,ds9
from LCSReadmasterBaseNSA import *
from scipy.interpolate import interp1d
import urllib, atpy
from pyraf import iraf
iraf.stsdas()
iraf.analysis()
iraf.isophote()
iraf.tables()
iraf.ttools()
iraf.digiphot()
#iraf.apphot()
iraf.daophot()
min_cutout_size=100. # in arcsec
mypath=os.getcwd()
if mypath.find('Users') > -1:
print "Running on Rose's mac pro"
homedir='/Users/rfinn/'
elif mypath.find('home') > -1:
print "Running on coma"
homedir='/home/rfinn/'
#getimages=1
class cluster(baseClusterNSA):
def __init__(self,clustername):
baseClusterNSA.__init__(self,clustername)
def measuresnr24(self):
working_dir=homedir+'research/LocalClusters/MIPS_SNR/'
s='mkdir -p '+working_dir
os.system(s)
os.chdir(working_dir)
self.mosaic24=homedir+'research/LocalClusters/Images/'+self.prefix+'/24umWCS/'+self.prefix+'-WCS-mosaic_minus_median_extract.fits'
#self.mosaic24unc=homedir+'research/LocalClusters/Images/'+self.prefix+'/24umWCS/'+self.prefix+'-WCS-mosaic_std.fits'
self.mosaic24unc=homedir+'research/LocalClusters/MIPS/rawdata/'+self.prefix+'/FullMosaic/mosaic_noise.fits'
working_dir=os.getcwd()
ra=self.ra[self.On24ImageFlag]
dec=self.dec[self.On24ImageFlag]
outfile=open(working_dir+'/'+self.prefix+'_incoords','w')
for i in range(len(ra)):
outfile.write('%12.8f %12.8f \n'%(ra[i],dec[i]))
outfile.close()
output_coords=working_dir+'/'+self.prefix+'_outcoords'
if os.path.exists(output_coords):
os.remove(output_coords)
input_coords=working_dir+'/'+self.prefix+'_incoords'
print input_coords,os.getcwd()
iraf.imcoords.wcsctran(image=self.mosaic24,input=input_coords,output=output_coords,inwcs='world',outwcs='logical',verbose='no')
# run qphot
#iraf.apphot.qphot(image=self.mosaic24,cbox=2,coords=output_coords,apertures='2.0, 3.0, 4.0, 5.0, 6.0' ,interactive='no',annulus=10, dannulus=5)
skyfile=working_dir+'/'+self.prefix+"_sky"
sky = open(skyfile,'w')
for i in range(len(ra)):
sky.write("0.0 \n")
sky.close()
aps = open("apertures",'w')
#aps.write("1,1.5,2,2.6,3,3.5,4,4.5,5,5.5")
aps.write("2.6")
aps.close()
#runiraf()
datfile=self.prefix+"_phot.dat"
if os.path.exists(datfile):
os.remove(datfile)
iraf.digiphot.daophot.phot(image=self.mosaic24,coords=output_coords,output=datfile,calgorithm='none',skyfile=skyfile,salgori="file",aperture="apertures",interactive="no",verify='no',verbose='no')
input=open(datfile,'r')
aperture=zeros(len(ra),'f')
counts=zeros(len(ra),'f')
area=zeros(len(ra),'f')
j=0
i=0
for line in input:
if line.find('#') > -1: #skip lines with '#' in them
continue
if line.find('mosaic') > -1: #skip lines with '#' in them
j=0
continue
j=j+1
if (j > 3):
#print j, line
t = line.split()
aperture[i]=float(t[0])
try:
counts[i]=float(t[1])
except ValueError:
counts[i]=0
try:
area[i]=float(t[2])
except ValueError:
area[i]=0
i += 1
input.close()
datfile=self.prefix+"_phot_unc.dat"
if os.path.exists(datfile):
os.remove(datfile)
iraf.digiphot.daophot.phot(image=self.mosaic24unc,coords=output_coords,output=datfile,calgorithm='none',skyfile=skyfile,salgori="file",aperture="apertures",interactive="no",verify='no',verbose='no')
##iraf.digiphot.apphot.phot(image,coords="noisecoords.dat",output="noise.dat",calgorithm='none',skyfile="sky",salgori="file",aperture="apertures",interactive="no",verbose='yes')
input=open(datfile,'r')
uncaperture=zeros(len(ra),'f')
unccounts=zeros(len(ra),'f')
uncarea=zeros(len(ra),'f')
dataflag=ones(len(ra),'i')
j=0
i=0
for line in input:
if line.find('#') > -1: #skip lines with '#' in them
continue
if line.find('mosaic') > -1: #skip lines with '#' in them
j=0
continue
j=j+1
if (j > 3):
print j, line
t = line.split()
uncaperture[i]=float(t[0])
try:
unccounts[i]=float(t[1])
except ValueError:
dataflag[i]=0
#uncarea[i]=float(t[2])
i += 1
input.close()
for i in range(len(counts)):
print i, counts[i],unccounts[i],abs(counts[i]/unccounts[i])
self.snr24=zeros(len(self.ra),'f')
self.snr24[self.On24ImageFlag]=counts/unccounts
self.f24NSA=zeros(len(self.ra),'f')
self.f24NSAerr=zeros(len(self.ra),'f')
self.f24NSA[self.On24ImageFlag]=counts
self.f24NSAerr[self.On24ImageFlag]=unccounts
#coordout=atpy.Table(output_coords,type='ascii')
#col=coordout['col1']
#row=coordout['col2']
# convert arrays to numbers
#col=col[0]
#row=row[0]
#figure()
outfile=open(homedir+'research/LocalClusters/NSAmastertables/SNR24/'+self.prefix+'_snr24NSA.dat','w')
for i in range(len(self.f24NSA)):
outfile.write('%7.4e %7.4e %5.2e \n'%(self.f24NSA[i],self.f24NSAerr[i],self.snr24[i]))
outfile.close()
mkw11=cluster('MKW11')
mkw8=cluster('MKW8')
awm4=cluster('AWM4')
a2052=cluster('A2052')
a2063=cluster('A2063')
ngc=cluster('NGC6107')
coma=cluster('Coma')
herc=cluster('Hercules')
a1367=cluster('A1367')
def copy_images_from_coma():
mylocalclusters=[mkw8,awm4,ngc,herc,a1367]
mylocalclusters=[mkw11,mkw8,awm4,a2052,a2063,ngc,coma,herc,a1367]
for cl in mylocalclusters:
s='scp -r coma:research/LocalClusters/Images/'+cl.prefix+'/24umWCS ~/research/LocalClusters/Images/'+cl.prefix+'/.'
# just get apex tables
s='scp -r coma:research/LocalClusters/Images/'+cl.prefix+'/24umWCS/'+cl.prefix+'-WCS-mosaic_extract.tbl ~/research/LocalClusters/Images/'+cl.prefix+'/24umWCS/.'
os.system(s)
def measure_snr():
mylocalclusters=[mkw11,mkw8,awm4,a2052,a2063,ngc,coma,herc,a1367]
#mylocalclusters=[mkw8,awm4,a2052,a2063,ngc,herc,a1367]
for cl in mylocalclusters:
cl.measuresnr24()
measure_snr()
#mkw11.measuresnr24()
| gpl-3.0 | -2,141,867,585,166,676,500 | 35.192513 | 206 | 0.585697 | false |
engdan77/edoAutoHomeMobile | garden/__init__.py | 9 | 35416 | '''
Graph
======
The :class:`Graph` widget is a widget for displaying plots. It supports
drawing multiple plot with different colors on the Graph. It also supports
a title, ticks, labeled ticks, grids and a log or linear representation on
both the x and y axis, independently.
To display a plot. First create a graph which will function as a "canvas" for
the plots. Then create plot objects e.g. MeshLinePlot and add them to the
graph.
To create a graph with x-axis between 0-100, y-axis between -1 to 1, x and y
labels of and X and Y, respectively, x major and minor ticks every 25, 5 units,
respectively, y major ticks every 1 units, full x and y grids and with
a red line plot containing a sin wave on this range::
from kivy.garden.graph import Graph, MeshLinePlot
graph = Graph(xlabel='X', ylabel='Y', x_ticks_minor=5,
x_ticks_major=25, y_ticks_major=1,
y_grid_label=True, x_grid_label=True, padding=5,
x_grid=True, y_grid=True, xmin=-0, xmax=100, ymin=-1, ymax=1)
plot = MeshLinePlot(color=[1, 0, 0, 1])
plot.points = [(x, sin(x / 10.)) for x in xrange(0, 101)]
graph.add_plot(plot)
The MeshLinePlot plot is a particular plot which draws a set of points using
a mesh object. The points are given as a list of tuples, with each tuple
being a (x, y) coordinate in the graph's units.
You can create different types of plots other than MeshLinePlot by inheriting
from the Plot class and implementing the required functions. The Graph object
provides a "canvas" to which a Plot's instructions are added. The plot object
is responsible for updating these instructions to show within the bounding
box of the graph the proper plot. The Graph notifies the Plot when it needs
to be redrawn due to changes. See the MeshLinePlot class for how it is done.
.. note::
The graph uses a stencil view to clip the plots to the graph display area.
As with the stencil graphics instructions, you cannot stack more than 8
stencil-aware widgets.
'''
__all__ = ('Graph', 'Plot', 'MeshLinePlot', 'MeshStemPlot')
from math import radians
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.stencilview import StencilView
from kivy.properties import NumericProperty, BooleanProperty,\
BoundedNumericProperty, StringProperty, ListProperty, ObjectProperty,\
DictProperty, AliasProperty
from kivy.clock import Clock
from kivy.graphics import Mesh, Color
from kivy.graphics.transformation import Matrix
from kivy.event import EventDispatcher
from kivy.lang import Builder
from kivy import metrics
from math import log10, floor, ceil
from decimal import Decimal
Builder.load_string('''
#:kivy 1.1.0
<RotateLabel>:
canvas.before:
PushMatrix
MatrixInstruction:
matrix: self.transform
canvas.after:
PopMatrix
''')
class RotateLabel(Label):
transform = ObjectProperty(Matrix())
class Graph(Widget):
'''Graph class, see module documentation for more information.
'''
# triggers a full reload of graphics
_trigger = ObjectProperty(None)
# triggers only a repositioning of objects due to size/pos updates
_trigger_size = ObjectProperty(None)
# holds widget with the x-axis label
_xlabel = ObjectProperty(None)
# holds widget with the y-axis label
_ylabel = ObjectProperty(None)
# holds all the x-axis tick mark labels
_x_grid_label = ListProperty([])
# holds all the y-axis tick mark labels
_y_grid_label = ListProperty([])
# holds the stencil view that clipse the plots to graph area
_plot_area = ObjectProperty(None)
# the mesh drawing all the ticks/grids
_mesh = ObjectProperty(None)
# the mesh which draws the surrounding rectangle
_mesh_rect = ObjectProperty(None)
# a list of locations of major and minor ticks. The values are not
# but is in the axis min - max range
_ticks_majorx = ListProperty([])
_ticks_minorx = ListProperty([])
_ticks_majory = ListProperty([])
_ticks_minory = ListProperty([])
def __init__(self, **kwargs):
super(Graph, self).__init__(**kwargs)
self._mesh = Mesh(mode='lines')
self._mesh_rect = Mesh(mode='line_strip')
val = 0.25
self.canvas.add(Color(1 * val, 1 * val, 1 * val))
self.canvas.add(self._mesh)
self.canvas.add(Color(1, 1, 1))
self.canvas.add(self._mesh_rect)
mesh = self._mesh_rect
mesh.vertices = [0] * (5 * 4)
mesh.indices = [k for k in xrange(5)]
self._plot_area = StencilView()
self.add_widget(self._plot_area)
self._trigger = Clock.create_trigger(self._redraw_all)
self._trigger_size = Clock.create_trigger(self._redraw_size)
self.bind(center=self._trigger_size, padding=self._trigger_size,
font_size=self._trigger_size, plots=self._trigger_size,
x_grid=self._trigger_size, y_grid=self._trigger_size,
draw_border=self._trigger_size)
self.bind(xmin=self._trigger, xmax=self._trigger,
xlog=self._trigger, x_ticks_major=self._trigger,
x_ticks_minor=self._trigger,
xlabel=self._trigger, x_grid_label=self._trigger,
ymin=self._trigger, ymax=self._trigger,
ylog=self._trigger, y_ticks_major=self._trigger,
y_ticks_minor=self._trigger,
ylabel=self._trigger, y_grid_label=self._trigger)
self._trigger()
def _get_ticks(self, major, minor, log, s_min, s_max):
if major and s_max > s_min:
if log:
s_min = log10(s_min)
s_max = log10(s_max)
# count the decades in min - max. This is in actual decades,
# not logs.
n_decades = floor(s_max - s_min)
# for the fractional part of the last decade, we need to
# convert the log value, x, to 10**x but need to handle
# differently if the last incomplete decade has a decade
# boundary in it
if floor(s_min + n_decades) != floor(s_max):
n_decades += 1 - (10 ** (s_min + n_decades + 1) - 10 **
s_max) / 10 ** floor(s_max + 1)
else:
n_decades += ((10 ** s_max - 10 ** (s_min + n_decades)) /
10 ** floor(s_max + 1))
# this might be larger than what is needed, but we delete
# excess later
n_ticks_major = n_decades / float(major)
n_ticks = int(floor(n_ticks_major * (minor if minor >=
1. else 1.0))) + 2
# in decade multiples, e.g. 0.1 of the decade, the distance
# between ticks
decade_dist = major / float(minor if minor else 1.0)
points_minor = [0] * n_ticks
points_major = [0] * n_ticks
k = 0 # position in points major
k2 = 0 # position in points minor
# because each decade is missing 0.1 of the decade, if a tick
# falls in < min_pos skip it
min_pos = 0.1 - 0.00001 * decade_dist
s_min_low = floor(s_min)
# first real tick location. value is in fractions of decades
# from the start we have to use decimals here, otherwise
# floating point inaccuracies results in bad values
start_dec = ceil((10 ** Decimal(s_min - s_min_low - 1)) /
Decimal(decade_dist)) * decade_dist
count_min = (0 if not minor else
floor(start_dec / decade_dist) % minor)
start_dec += s_min_low
count = 0 # number of ticks we currently have passed start
while True:
# this is the current position in decade that we are.
# e.g. -0.9 means that we're at 0.1 of the 10**ceil(-0.9)
# decade
pos_dec = start_dec + decade_dist * count
pos_dec_low = floor(pos_dec)
diff = pos_dec - pos_dec_low
zero = abs(diff) < 0.001 * decade_dist
if zero:
# the same value as pos_dec but in log scale
pos_log = pos_dec_low
else:
pos_log = log10((pos_dec - pos_dec_low
) * 10 ** ceil(pos_dec))
if pos_log > s_max:
break
count += 1
if zero or diff >= min_pos:
if minor and not count_min % minor:
points_major[k] = pos_log
k += 1
else:
points_minor[k2] = pos_log
k2 += 1
count_min += 1
#n_ticks = len(points)
else:
# distance between each tick
tick_dist = major / float(minor if minor else 1.0)
n_ticks = int(floor((s_max - s_min) / tick_dist) + 1)
points_major = [0] * int(floor((s_max - s_min) / float(major))
+ 1)
points_minor = [0] * (n_ticks - len(points_major) + 1)
k = 0 # position in points major
k2 = 0 # position in points minor
for m in xrange(0, n_ticks):
if minor and m % minor:
points_minor[k2] = m * tick_dist + s_min
k2 += 1
else:
points_major[k] = m * tick_dist + s_min
k += 1
del points_major[k:]
del points_minor[k2:]
else:
points_major = []
points_minor = []
return points_major, points_minor
def _update_labels(self):
xlabel = self._xlabel
ylabel = self._ylabel
x = self.x
y = self.y
width = self.width
height = self.height
padding = self.padding
x_next = padding + x
y_next = padding + y
xextent = x + width
yextent = y + height
ymin = self.ymin
ymax = self.ymax
xmin = self.xmin
precision = self.precision
x_overlap = False
y_overlap = False
# set up x and y axis labels
if xlabel:
xlabel.text = self.xlabel
xlabel.texture_update()
xlabel.size = xlabel.texture_size
xlabel.pos = (x + width / 2. - xlabel.width / 2., padding + y)
y_next += padding + xlabel.height
if ylabel:
ylabel.text = self.ylabel
ylabel.texture_update()
ylabel.size = ylabel.texture_size
ylabel.x = padding + x - (ylabel.width / 2. - ylabel.height / 2.)
x_next += padding + ylabel.height
xpoints = self._ticks_majorx
xlabels = self._x_grid_label
xlabel_grid = self.x_grid_label
ylabel_grid = self.y_grid_label
ypoints = self._ticks_majory
ylabels = self._y_grid_label
# now x and y tick mark labels
if len(ylabels) and ylabel_grid:
# horizontal size of the largest tick label, to have enough room
ylabels[0].text = precision % ypoints[0]
ylabels[0].texture_update()
y1 = ylabels[0].texture_size
y_start = y_next + (padding + y1[1] if len(xlabels) and xlabel_grid
else 0) +\
(padding + y1[1] if not y_next else 0)
yextent = y + height - padding - y1[1] / 2.
if self.ylog:
ymax = log10(ymax)
ymin = log10(ymin)
ratio = (yextent - y_start) / float(ymax - ymin)
y_start -= y1[1] / 2.
func = (lambda x: 10 ** x) if self.ylog else lambda x: x
y1 = y1[0]
for k in xrange(len(ylabels)):
ylabels[k].text = precision % func(ypoints[k])
ylabels[k].texture_update()
ylabels[k].size = ylabels[k].texture_size
y1 = max(y1, ylabels[k].texture_size[0])
ylabels[k].pos = (x_next, y_start + (ypoints[k] - ymin) *
ratio)
if len(ylabels) > 1 and ylabels[0].top > ylabels[1].y:
y_overlap = True
else:
x_next += y1 + padding
if len(xlabels) and xlabel_grid:
func = log10 if self.xlog else lambda x: x
# find the distance from the end that'll fit the last tick label
xlabels[0].text = precision % func(xpoints[-1])
xlabels[0].texture_update()
xextent = x + width - xlabels[0].texture_size[0] / 2. - padding
# find the distance from the start that'll fit the first tick label
if not x_next:
xlabels[0].text = precision % func(xpoints[0])
xlabels[0].texture_update()
x_next = padding + xlabels[0].texture_size[0] / 2.
xmin = func(xmin)
ratio = (xextent - x_next) / float(func(self.xmax) - xmin)
func = (lambda x: 10 ** x) if self.xlog else lambda x: x
right = -1
for k in xrange(len(xlabels)):
xlabels[k].text = precision % func(xpoints[k])
# update the size so we can center the labels on ticks
xlabels[k].texture_update()
xlabels[k].size = xlabels[k].texture_size
xlabels[k].pos = (x_next + (xpoints[k] - xmin) * ratio -
xlabels[k].texture_size[0] / 2., y_next)
if xlabels[k].x < right:
x_overlap = True
break
right = xlabels[k].right
if not x_overlap:
y_next += padding + xlabels[0].texture_size[1]
# now re-center the x and y axis labels
if xlabel:
xlabel.x = x_next + (xextent - x_next) / 2. - xlabel.width / 2.
if ylabel:
ylabel.y = y_next + (yextent - y_next) / 2. - ylabel.height / 2.
t = Matrix().translate(ylabel.center[0], ylabel.center[1], 0)
t = t.multiply(Matrix().rotate(-radians(270), 0, 0, 1))
ylabel.transform = t.multiply(Matrix().translate(-ylabel.center[0],
-ylabel.center[1],
0))
if x_overlap:
for k in xrange(len(xlabels)):
xlabels[k].text = ''
if y_overlap:
for k in xrange(len(ylabels)):
ylabels[k].text = ''
return x_next, y_next, xextent, yextent
def _update_ticks(self, size):
# re-compute the positions of the bounding rectangle
mesh = self._mesh_rect
vert = mesh.vertices
if self.draw_border:
vert[0] = size[0]
vert[1] = size[1]
vert[4] = size[2]
vert[5] = size[1]
vert[8] = size[2]
vert[9] = size[3]
vert[12] = size[0]
vert[13] = size[3]
vert[16] = size[0]
vert[17] = size[1]
else:
vert[0:18] = [0 for k in xrange(18)]
mesh.vertices = vert
# re-compute the positions of the x/y axis ticks
mesh = self._mesh
vert = mesh.vertices
start = 0
xpoints = self._ticks_majorx
ypoints = self._ticks_majory
ylog = self.ylog
xlog = self.xlog
xmin = self.xmin
xmax = self.xmax
if xlog:
xmin = log10(xmin)
xmax = log10(xmax)
ymin = self.ymin
ymax = self.ymax
if ylog:
xmin = log10(ymin)
ymax = log10(ymax)
if len(xpoints):
top = size[3] if self.x_grid else metrics.dp(12) + size[1]
ratio = (size[2] - size[0]) / float(xmax - xmin)
for k in xrange(start, len(xpoints) + start):
vert[k * 8] = size[0] + (xpoints[k - start] - xmin) * ratio
vert[k * 8 + 1] = size[1]
vert[k * 8 + 4] = vert[k * 8]
vert[k * 8 + 5] = top
start += len(xpoints)
if len(ypoints):
top = size[2] if self.y_grid else metrics.dp(12) + size[0]
ratio = (size[3] - size[1]) / float(ymax - ymin)
for k in xrange(start, len(ypoints) + start):
vert[k * 8 + 1] = size[1] + (ypoints[k - start] - ymin) * ratio
vert[k * 8 + 5] = vert[k * 8 + 1]
vert[k * 8] = size[0]
vert[k * 8 + 4] = top
mesh.vertices = vert
def _update_plots(self, size):
ylog = self.ylog
xlog = self.xlog
xmin = self.xmin
xmax = self.xmax
ymin = self.ymin
ymax = self.ymax
for plot in self.plots:
plot._update(xlog, xmin, xmax, ylog, ymin, ymax, size)
def _redraw_all(self, *args):
# add/remove all the required labels
font_size = self.font_size
if self.xlabel:
if not self._xlabel:
xlabel = Label(font_size=font_size)
self.add_widget(xlabel)
self._xlabel = xlabel
else:
xlabel = self._xlabel
if xlabel:
self.remove_widget(xlabel)
self._xlabel = None
grids = self._x_grid_label
xpoints_major, xpoints_minor = self._get_ticks(self.x_ticks_major,
self.x_ticks_minor,
self.xlog, self.xmin,
self.xmax)
self._ticks_majorx = xpoints_major
self._ticks_minorx = xpoints_minor
if not self.x_grid_label:
n_labels = 0
else:
n_labels = len(xpoints_major)
for k in xrange(n_labels, len(grids)):
self.remove_widget(grids[k])
del grids[n_labels:]
grid_len = len(grids)
grids.extend([None] * (n_labels - len(grids)))
for k in xrange(grid_len, n_labels):
grids[k] = Label(font_size=font_size)
self.add_widget(grids[k])
if self.ylabel:
if not self._ylabel:
ylabel = RotateLabel(font_size=font_size)
self.add_widget(ylabel)
self._ylabel = ylabel
else:
ylabel = self._ylabel
if ylabel:
self.remove_widget(ylabel)
self._ylabel = None
grids = self._y_grid_label
ypoints_major, ypoints_minor = self._get_ticks(self.y_ticks_major,
self.y_ticks_minor,
self.ylog, self.ymin,
self.ymax)
self._ticks_majory = ypoints_major
self._ticks_minory = ypoints_minor
if not self.y_grid_label:
n_labels = 0
else:
n_labels = len(ypoints_major)
for k in xrange(n_labels, len(grids)):
self.remove_widget(grids[k])
del grids[n_labels:]
grid_len = len(grids)
grids.extend([None] * (n_labels - len(grids)))
for k in xrange(grid_len, n_labels):
grids[k] = Label(font_size=font_size)
self.add_widget(grids[k])
mesh = self._mesh
n_points = (len(xpoints_major) + len(xpoints_minor) +
len(ypoints_major) + len(ypoints_minor))
mesh.vertices = [0] * (n_points * 8)
mesh.indices = [k for k in xrange(n_points * 2)]
self._redraw_size()
def _redraw_size(self, *args):
# size a 4-tuple describing the bounding box in which we can draw
# graphs, it's (x0, y0, x1, y1), which correspond with the bottom left
# and top right corner locations, respectively
size = self._update_labels()
self._plot_area.pos = (size[0], size[1])
self._plot_area.size = (size[2] - size[0], size[3] - size[1])
self._update_ticks(size)
self._update_plots(size)
def add_plot(self, plot):
'''Add a new plot to this graph.
:Parameters:
`plot`:
Plot to add to this graph.
>>> graph = Graph()
>>> plot = MeshLinePlot(mode='line_strip', color=[1, 0, 0, 1])
>>> plot.points = [(x / 10., sin(x / 50.)) for x in xrange(-0, 101)]
>>> graph.add_plot(plot)
'''
area = self._plot_area
for group in plot._get_drawings():
area.canvas.add(group)
self.plots = self.plots + [plot]
def remove_plot(self, plot):
'''Remove a plot from this graph.
:Parameters:
`plot`:
Plot to remove from this graph.
>>> graph = Graph()
>>> plot = MeshLinePlot(mode='line_strip', color=[1, 0, 0, 1])
>>> plot.points = [(x / 10., sin(x / 50.)) for x in xrange(-0, 101)]
>>> graph.add_plot(plot)
>>> graph.remove_plot(plot)
'''
self._plot_area.canvas.remove_group(plot._get_group())
self.plots.remove(plot)
xmin = NumericProperty(0.)
'''The x-axis minimum value.
If :data:`xlog` is True, xmin must be larger than zero.
:data:`xmin` is a :class:`~kivy.properties.NumericProperty`, defaults to 0.
'''
xmax = NumericProperty(100.)
'''The x-axis maximum value, larger than xmin.
:data:`xmax` is a :class:`~kivy.properties.NumericProperty`, defaults to 0.
'''
xlog = BooleanProperty(False)
'''Determines whether the x-axis should be displayed logarithmically (True)
or linearly (False).
:data:`xlog` is a :class:`~kivy.properties.BooleanProperty`, defaults
to False.
'''
x_ticks_major = BoundedNumericProperty(0, min=0)
'''Distance between major tick marks on the x-axis.
Determines the distance between the major tick marks. Major tick marks
start from min and re-occur at every ticks_major until :data:`xmax`.
If :data:`xmax` doesn't overlap with a integer multiple of ticks_major,
no tick will occur at :data:`xmax`. Zero indicates no tick marks.
If :data:`xlog` is true, then this indicates the distance between ticks
in multiples of current decade. E.g. if :data:`xmin` is 0.1 and
ticks_major is 0.1, it means there will be a tick at every 10th of the
decade, i.e. 0.1 ... 0.9, 1, 2... If it is 0.3, the ticks will occur at
0.1, 0.3, 0.6, 0.9, 2, 5, 8, 10. You'll notice that it went from 8 to 10
instead of to 20, that's so that we can say 0.5 and have ticks at every
half decade, e.g. 0.1, 0.5, 1, 5, 10, 50... Similarly, if ticks_major is
1.5, there will be ticks at 0.1, 5, 100, 5,000... Also notice, that there's
always a major tick at the start. Finally, if e.g. :data:`xmin` is 0.6
and this 0.5 there will be ticks at 0.6, 1, 5...
:data:`x_ticks_major` is a
:class:`~kivy.properties.BoundedNumericProperty`, defaults to 0.
'''
x_ticks_minor = BoundedNumericProperty(0, min=0)
'''The number of sub-intervals that divide x_ticks_major.
Determines the number of sub-intervals into which ticks_major is divided,
if non-zero. The actual number of minor ticks between the major ticks is
ticks_minor - 1. Only used if ticks_major is non-zero. If there's no major
tick at xmax then the number of minor ticks after the last major
tick will be however many ticks fit until xmax.
If self.xlog is true, then this indicates the number of intervals the
distance between major ticks is divided. The result is the number of
multiples of decades between ticks. I.e. if ticks_minor is 10, then if
ticks_major is 1, there will be ticks at 0.1, 0.2...0.9, 1, 2, 3... If
ticks_major is 0.3, ticks will occur at 0.1, 0.12, 0.15, 0.18... Finally,
as is common, if ticks major is 1, and ticks minor is 5, there will be
ticks at 0.1, 0.2, 0.4... 0.8, 1, 2...
:data:`x_ticks_minor` is a
:class:`~kivy.properties.BoundedNumericProperty`, defaults to 0.
'''
x_grid = BooleanProperty(False)
'''Determines whether the x-axis has tick marks or a full grid.
If :data:`x_ticks_major` is non-zero, then if x_grid is False tick marks
will be displayed at every major tick. If x_grid is True, instead of ticks,
a vertical line will be displayed at every major tick.
:data:`x_grid` is a :class:`~kivy.properties.BooleanProperty`, defaults
to False.
'''
x_grid_label = BooleanProperty(False)
'''Whether labels should be displayed beneath each major tick. If true,
each major tick will have a label containing the axis value.
:data:`x_grid_label` is a :class:`~kivy.properties.BooleanProperty`,
defaults to False.
'''
xlabel = StringProperty('')
'''The label for the x-axis. If not empty it is displayed in the center of
the axis.
:data:`xlabel` is a :class:`~kivy.properties.StringProperty`,
defaults to ''.
'''
ymin = NumericProperty(0.)
'''The y-axis minimum value.
If :data:`ylog` is True, ymin must be larger than zero.
:data:`ymin` is a :class:`~kivy.properties.NumericProperty`, defaults to 0.
'''
ymax = NumericProperty(100.)
'''The y-axis maximum value, larger than ymin.
:data:`ymax` is a :class:`~kivy.properties.NumericProperty`, defaults to 0.
'''
ylog = BooleanProperty(False)
'''Determines whether the y-axis should be displayed logarithmically (True)
or linearly (False).
:data:`ylog` is a :class:`~kivy.properties.BooleanProperty`, defaults
to False.
'''
y_ticks_major = BoundedNumericProperty(0, min=0)
'''Distance between major tick marks. See :data:`x_ticks_major`.
:data:`y_ticks_major` is a
:class:`~kivy.properties.BoundedNumericProperty`, defaults to 0.
'''
y_ticks_minor = BoundedNumericProperty(0, min=0)
'''The number of sub-intervals that divide ticks_major.
See :data:`x_ticks_minor`.
:data:`y_ticks_minor` is a
:class:`~kivy.properties.BoundedNumericProperty`, defaults to 0.
'''
y_grid = BooleanProperty(False)
'''Determines whether the y-axis has tick marks or a full grid. See
:data:`x_grid`.
:data:`y_grid` is a :class:`~kivy.properties.BooleanProperty`, defaults
to False.
'''
y_grid_label = BooleanProperty(False)
'''Whether labels should be displayed beneath each major tick. If true,
each major tick will have a label containing the axis value.
:data:`y_grid_label` is a :class:`~kivy.properties.BooleanProperty`,
defaults to False.
'''
ylabel = StringProperty('')
'''The label for the y-axis. If not empty it is displayed in the center of
the axis.
:data:`ylabel` is a :class:`~kivy.properties.StringProperty`,
defaults to ''.
'''
padding = NumericProperty('5dp')
'''Padding distances between the labels, titles and graph, as well between
the widget and the objects near the boundaries.
:data:`padding` is a :class:`~kivy.properties.NumericProperty`, defaults
to 5dp.
'''
font_size = NumericProperty('15sp')
'''Font size of the labels.
:data:`font_size` is a :class:`~kivy.properties.NumericProperty`, defaults
to 15sp.
'''
precision = StringProperty('%g')
'''Determines the numerical precision of the tick mark labels. This value
governs how the numbers are converted into string representation. Accepted
values are those listed in Python's manual in the
"String Formatting Operations" section.
:data:`precision` is a :class:`~kivy.properties.StringProperty`, defaults
to '%g'.
'''
draw_border = BooleanProperty(True)
'''Whether a border is drawn around the canvas of the graph where the
plots are displayed.
:data:`draw_border` is a :class:`~kivy.properties.BooleanProperty`,
defaults to True.
'''
plots = ListProperty([])
'''Holds a list of all the plots in the graph. To add and remove plots
from the graph use :data:`add_plot` and :data:`add_plot`. Do not add
directly edit this list.
:data:`plots` is a :class:`~kivy.properties.ListProperty`,
defaults to [].
'''
class Plot(EventDispatcher):
'''Plot class, see module documentation for more information.
'''
# this function is called by graph whenever any of the parameters
# change. The plot should be recalculated then.
# log, min, max indicate the axis settings.
# size a 4-tuple describing the bounding box in which we can draw
# graphs, it's (x0, y0, x1, y1), which correspond with the bottom left
# and top right corner locations, respectively.
def _update(self, xlog, xmin, xmax, ylog, ymin, ymax, size):
pass
# returns a string which is unique and is the group name given to all the
# instructions returned by _get_drawings. Graph uses this to remove
# these instructions when needed.
def _get_group(self):
return ''
# returns a list of canvas instructions that will be added to the graph's
# canvas. These instructions must belong to a group as described
# in _get_group.
def _get_drawings(self):
return []
class MeshLinePlot(Plot):
'''MeshLinePlot class which displays a set of points similar to a mesh.
'''
# mesh which forms the plot
_mesh = ObjectProperty(None)
# color of the plot
_color = ObjectProperty(None)
_trigger = ObjectProperty(None)
# most recent values of the params used to draw the plot
_params = DictProperty({'xlog': False, 'xmin': 0, 'xmax': 100,
'ylog': False, 'ymin': 0, 'ymax': 100,
'size': (0, 0, 0, 0)})
def __init__(self, **kwargs):
self._color = Color(1, 1, 1, group='LinePlot%d' % id(self))
self._mesh = Mesh(mode='line_strip', group='LinePlot%d' % id(self))
super(MeshLinePlot, self).__init__(**kwargs)
self._trigger = Clock.create_trigger(self._redraw)
self.bind(_params=self._trigger, points=self._trigger)
def _update(self, xlog, xmin, xmax, ylog, ymin, ymax, size):
self._params = {'xlog': xlog, 'xmin': xmin, 'xmax': xmax, 'ylog': ylog,
'ymin': ymin, 'ymax': ymax, 'size': size}
def _redraw(self, *args):
points = self.points
mesh = self._mesh
vert = mesh.vertices
ind = mesh.indices
params = self._params
funcx = log10 if params['xlog'] else lambda x: x
funcy = log10 if params['ylog'] else lambda x: x
xmin = funcx(params['xmin'])
ymin = funcy(params['ymin'])
diff = len(points) - len(vert) / 4
size = params['size']
ratiox = (size[2] - size[0]) / float(funcx(params['xmax']) - xmin)
ratioy = (size[3] - size[1]) / float(funcy(params['ymax']) - ymin)
if diff < 0:
del vert[4 * len(points):]
del ind[len(points):]
elif diff > 0:
ind.extend(xrange(len(ind), len(ind) + diff))
vert.extend([0] * (diff * 4))
for k in xrange(len(points)):
vert[k * 4] = (funcx(points[k][0]) - xmin) * ratiox + size[0]
vert[k * 4 + 1] = (funcy(points[k][1]) - ymin) * ratioy + size[1]
mesh.vertices = vert
def _get_group(self):
return 'LinePlot%d' % id(self)
def _get_drawings(self):
return [self._color, self._mesh]
def _set_mode(self, value):
self._mesh.mode = value
mode = AliasProperty(lambda self: self._mesh.mode, _set_mode)
'''VBO Mode used for drawing the points. Can be one of: 'points',
'line_strip', 'line_loop', 'lines', 'triangle_strip', 'triangle_fan'.
See :class:`~kivy.graphics.Mesh` for more details.
Defaults to 'line_strip'.
'''
def _set_color(self, value):
self._color.rgba = value
color = AliasProperty(lambda self: self._color.rgba, _set_color)
'''Plot color, in the format [r, g, b, a] with values between 0-1.
Defaults to [1, 1, 1, 1].
'''
points = ListProperty([])
'''List of x, y points to be displayed in the plot.
The elements of points are 2-tuples, (x, y). The points are displayed
based on the mode setting.
:data:`points` is a :class:`~kivy.properties.ListProperty`, defaults to
[].
'''
class MeshStemPlot(MeshLinePlot):
'''MeshStemPlot uses the MeshLinePlot class to draw a stem plot. The data
provided is graphed from origin to the data point.
'''
def _redraw(self, *args):
points = self.points
mesh = self._mesh
self._mesh.mode = 'lines'
vert = mesh.vertices
ind = mesh.indices
params = self._params
funcx = log10 if params['xlog'] else lambda x: x
funcy = log10 if params['ylog'] else lambda x: x
xmin = funcx(params['xmin'])
ymin = funcy(params['ymin'])
diff = len(points) * 2 - len(vert) / 4
size = params['size']
ratiox = (size[2] - size[0]) / float(funcx(params['xmax']) - xmin)
ratioy = (size[3] - size[1]) / float(funcy(params['ymax']) - ymin)
if diff < 0:
del vert[4 * len(points):]
del ind[len(points):]
elif diff > 0:
ind.extend(xrange(len(ind), len(ind) + diff))
vert.extend([0] * (diff * 4))
for k in xrange(len(points)):
vert[k * 8] = (funcx(points[k][0]) - xmin) * ratiox + size[0]
vert[k * 8 + 1] = (0 - ymin) * ratioy + size[1]
vert[k * 8 + 4] = (funcx(points[k][0]) - xmin) * ratiox + size[0]
vert[k * 8 + 5] = (funcy(points[k][1]) - ymin) * ratioy + size[1]
mesh.vertices = vert
if __name__ == '__main__':
from math import sin, cos
from kivy.app import App
class TestApp(App):
def build(self):
graph = Graph(xlabel='Cheese', ylabel='Apples', x_ticks_minor=5,
x_ticks_major=25, y_ticks_major=1,
y_grid_label=True, x_grid_label=True, padding=5,
xlog=False, ylog=False, x_grid=True, y_grid=True,
xmin=-50, xmax=50, ymin=-1, ymax=1)
plot = MeshLinePlot(color=[1, 0, 0, 1])
plot.points = [(x / 10., sin(x / 50.)) for x in xrange(-500, 501)]
graph.add_plot(plot)
plot = MeshLinePlot(color=[0, 1, 0, 1])
plot.points = [(x / 10., cos(x / 50.)) for x in xrange(-600, 501)]
graph.add_plot(plot)
plot = MeshLinePlot(color=[0, 0, 1, 1])
graph.add_plot(plot)
plot.points = [(x, x / 50.) for x in xrange(-50, 51)]
return graph
TestApp().run()
| mit | 1,045,187,973,886,983,200 | 38.57095 | 79 | 0.555399 | false |
ostash/qt-creator-i18n-uk | tests/system/suite_qtquick/tst_qtquick_creation/test.py | 7 | 2111 | source("../../shared/qtcreator.py")
workingDir = None
def main():
global workingDir
startApplication("qtcreator" + SettingsPath)
# using a temporary directory won't mess up an eventually exisiting
workingDir = tempDir()
projectName = createNewQtQuickApplication(workingDir, targets = QtQuickConstants.Targets.DESKTOP)
# wait for parsing to complete
waitForSignal("{type='CppTools::Internal::CppModelManager' unnamed='1'}", "sourceFilesRefreshed(QStringList)")
test.log("Building project")
result = modifyRunSettingsForHookInto(projectName, 11223)
invokeMenuItem("Build", "Build All")
waitForSignal("{type='ProjectExplorer::BuildManager' unnamed='1'}", "buildQueueFinished(bool)")
if not checkCompile():
test.fatal("Compile failed")
else:
checkLastBuild()
test.log("Running project (includes build)")
if result:
result = addExecutableAsAttachableAUT(projectName, 11223)
allowAppThroughWinFW(workingDir, projectName)
if result:
result = runAndCloseApp(True, projectName, 11223, "subprocessFunction", SubprocessType.QT_QUICK_APPLICATION)
else:
result = runAndCloseApp(sType=SubprocessType.QT_QUICK_APPLICATION)
removeExecutableAsAttachableAUT(projectName, 11223)
deleteAppFromWinFW(workingDir, projectName)
else:
result = runAndCloseApp()
if result:
logApplicationOutput()
invokeMenuItem("File", "Exit")
def subprocessFunction():
helloWorldText = waitForObject("{container={type='QmlApplicationViewer' visible='1' unnamed='1'} "
"enabled='true' text='Hello World' type='Text' unnamed='1' visible='true'}")
test.log("Clicking 'Hello World' Text to close QmlApplicationViewer")
mouseClick(helloWorldText, 5, 5, 0, Qt.LeftButton)
def cleanup():
global workingDir
# waiting for a clean exit - for a full-remove of the temp directory
waitForCleanShutdown()
if workingDir != None:
deleteDirIfExists(workingDir)
| lgpl-2.1 | 7,007,054,503,512,343,000 | 41.22 | 124 | 0.678351 | false |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32com/client/__init__.py | 5 | 22594 | # This module exists to create the "best" dispatch object for a given
# object. If "makepy" support for a given object is detected, it is
# used, otherwise a dynamic dispatch object.
# Note that if the unknown dispatch object then returns a known
# dispatch object, the known class will be used. This contrasts
# with dynamic.Dispatch behaviour, where dynamic objects are always used.
import pythoncom
from . import dynamic
from . import gencache
import sys
import pywintypes
_PyIDispatchType = pythoncom.TypeIIDs[pythoncom.IID_IDispatch]
def __WrapDispatch(dispatch, userName = None, resultCLSID = None, typeinfo = None, \
UnicodeToString=None, clsctx = pythoncom.CLSCTX_SERVER,
WrapperClass = None):
"""
Helper function to return a makepy generated class for a CLSID if it exists,
otherwise cope by using CDispatch.
"""
assert UnicodeToString is None, "this is deprecated and will go away"
if resultCLSID is None:
try:
typeinfo = dispatch.GetTypeInfo()
if typeinfo is not None: # Some objects return NULL, some raise exceptions...
resultCLSID = str(typeinfo.GetTypeAttr()[0])
except (pythoncom.com_error, AttributeError):
pass
if resultCLSID is not None:
from . import gencache
# Attempt to load generated module support
# This may load the module, and make it available
klass = gencache.GetClassForCLSID(resultCLSID)
if klass is not None:
return klass(dispatch)
# Return a "dynamic" object - best we can do!
if WrapperClass is None: WrapperClass = CDispatch
return dynamic.Dispatch(dispatch, userName, WrapperClass, typeinfo, clsctx=clsctx)
def GetObject(Pathname = None, Class = None, clsctx = None):
"""
Mimic VB's GetObject() function.
ob = GetObject(Class = "ProgID") or GetObject(Class = clsid) will
connect to an already running instance of the COM object.
ob = GetObject(r"c:\blah\blah\foo.xls") (aka the COM moniker syntax)
will return a ready to use Python wrapping of the required COM object.
Note: You must specifiy one or the other of these arguments. I know
this isn't pretty, but it is what VB does. Blech. If you don't
I'll throw ValueError at you. :)
This will most likely throw pythoncom.com_error if anything fails.
"""
if clsctx is None:
clsctx = pythoncom.CLSCTX_ALL
if (Pathname is None and Class is None) or \
(Pathname is not None and Class is not None):
raise ValueError("You must specify a value for Pathname or Class, but not both.")
if Class is not None:
return GetActiveObject(Class, clsctx)
else:
return Moniker(Pathname, clsctx)
def GetActiveObject(Class, clsctx = pythoncom.CLSCTX_ALL):
"""
Python friendly version of GetObject's ProgID/CLSID functionality.
"""
resultCLSID = pywintypes.IID(Class)
dispatch = pythoncom.GetActiveObject(resultCLSID)
dispatch = dispatch.QueryInterface(pythoncom.IID_IDispatch)
return __WrapDispatch(dispatch, Class, resultCLSID = resultCLSID, clsctx = clsctx)
def Moniker(Pathname, clsctx = pythoncom.CLSCTX_ALL):
"""
Python friendly version of GetObject's moniker functionality.
"""
moniker, i, bindCtx = pythoncom.MkParseDisplayName(Pathname)
dispatch = moniker.BindToObject(bindCtx, None, pythoncom.IID_IDispatch)
return __WrapDispatch(dispatch, Pathname, clsctx=clsctx)
def Dispatch(dispatch, userName = None, resultCLSID = None, typeinfo = None, UnicodeToString=None, clsctx = pythoncom.CLSCTX_SERVER):
"""Creates a Dispatch based COM object.
"""
assert UnicodeToString is None, "this is deprecated and will go away"
dispatch, userName = dynamic._GetGoodDispatchAndUserName(dispatch,userName,clsctx)
return __WrapDispatch(dispatch, userName, resultCLSID, typeinfo, clsctx=clsctx)
def DispatchEx(clsid, machine=None, userName = None, resultCLSID = None, typeinfo = None, UnicodeToString=None, clsctx = None):
"""Creates a Dispatch based COM object on a specific machine.
"""
assert UnicodeToString is None, "this is deprecated and will go away"
# If InProc is registered, DCOM will use it regardless of the machine name
# (and regardless of the DCOM config for the object.) So unless the user
# specifies otherwise, we exclude inproc apps when a remote machine is used.
if clsctx is None:
clsctx = pythoncom.CLSCTX_SERVER
if machine is not None: clsctx = clsctx & ~pythoncom.CLSCTX_INPROC
if machine is None:
serverInfo = None
else:
serverInfo = (machine,)
if userName is None: userName = clsid
dispatch = pythoncom.CoCreateInstanceEx(clsid, None, clsctx, serverInfo, (pythoncom.IID_IDispatch,))[0]
return Dispatch(dispatch, userName, resultCLSID, typeinfo, clsctx=clsctx)
class CDispatch(dynamic.CDispatch):
"""
The dynamic class used as a last resort.
The purpose of this overriding of dynamic.CDispatch is to perpetuate the policy
of using the makepy generated wrapper Python class instead of dynamic.CDispatch
if/when possible.
"""
def _wrap_dispatch_(self, ob, userName = None, returnCLSID = None, UnicodeToString=None):
assert UnicodeToString is None, "this is deprecated and will go away"
return Dispatch(ob, userName, returnCLSID,None)
def CastTo(ob, target, typelib = None):
"""'Cast' a COM object to another interface"""
# todo - should support target being an IID
mod = None
if typelib is not None: # caller specified target typelib (TypelibSpec). See e.g. selecttlb.EnumTlbs().
mod = gencache.MakeModuleForTypelib(typelib.clsid, typelib.lcid, int(typelib.major, 16), int(typelib.minor, 16))
if not hasattr(mod, target):
raise ValueError("The interface name '%s' does not appear in the " \
"specified library %r" % (target, typelib.ver_desc))
elif hasattr(target, "index"): # string like
# for now, we assume makepy for this to work.
if "CLSID" not in ob.__class__.__dict__:
# Eeek - no makepy support - try and build it.
ob = gencache.EnsureDispatch(ob)
if "CLSID" not in ob.__class__.__dict__:
raise ValueError("Must be a makepy-able object for this to work")
clsid = ob.CLSID
# Lots of hoops to support "demand-build" - ie, generating
# code for an interface first time it is used. We assume the
# interface name exists in the same library as the object.
# This is generally the case - only referenced typelibs may be
# a problem, and we can handle that later. Maybe <wink>
# So get the generated module for the library itself, then
# find the interface CLSID there.
mod = gencache.GetModuleForCLSID(clsid)
# Get the 'root' module.
mod = gencache.GetModuleForTypelib(mod.CLSID, mod.LCID,
mod.MajorVersion, mod.MinorVersion)
# Find the CLSID of the target
target_clsid = mod.NamesToIIDMap.get(target)
if target_clsid is None:
raise ValueError("The interface name '%s' does not appear in the " \
"same library as object '%r'" % (target, ob))
mod = gencache.GetModuleForCLSID(target_clsid)
if mod is not None:
target_class = getattr(mod, target)
# resolve coclass to interface
target_class = getattr(target_class, "default_interface", target_class)
return target_class(ob) # auto QI magic happens
raise ValueError
class Constants:
"""A container for generated COM constants.
"""
def __init__(self):
self.__dicts__ = [] # A list of dictionaries
def __getattr__(self, a):
for d in self.__dicts__:
if a in d:
return d[a]
raise AttributeError(a)
# And create an instance.
constants = Constants()
# A helpers for DispatchWithEvents - this becomes __setattr__ for the
# temporary class.
def _event_setattr_(self, attr, val):
try:
# Does the COM object have an attribute of this name?
self.__class__.__bases__[0].__setattr__(self, attr, val)
except AttributeError:
# Otherwise just stash it away in the instance.
self.__dict__[attr] = val
# An instance of this "proxy" is created to break the COM circular references
# that exist (ie, when we connect to the COM events, COM keeps a reference
# to the object. Thus, the Event connection must be manually broken before
# our object can die. This solves the problem by manually breaking the connection
# to the real object as the proxy dies.
class EventsProxy:
def __init__(self, ob):
self.__dict__['_obj_'] = ob
def __del__(self):
try:
# If there is a COM error on disconnection we should
# just ignore it - object probably already shut down...
self._obj_.close()
except pythoncom.com_error:
pass
def __getattr__(self, attr):
return getattr(self._obj_, attr)
def __setattr__(self, attr, val):
setattr(self._obj_, attr, val)
def DispatchWithEvents(clsid, user_event_class):
"""Create a COM object that can fire events to a user defined class.
clsid -- The ProgID or CLSID of the object to create.
user_event_class -- A Python class object that responds to the events.
This requires makepy support for the COM object being created. If
this support does not exist it will be automatically generated by
this function. If the object does not support makepy, a TypeError
exception will be raised.
The result is a class instance that both represents the COM object
and handles events from the COM object.
It is important to note that the returned instance is not a direct
instance of the user_event_class, but an instance of a temporary
class object that derives from three classes:
* The makepy generated class for the COM object
* The makepy generated class for the COM events
* The user_event_class as passed to this function.
If this is not suitable, see the getevents function for an alternative
technique of handling events.
Object Lifetimes: Whenever the object returned from this function is
cleaned-up by Python, the events will be disconnected from
the COM object. This is almost always what should happen,
but see the documentation for getevents() for more details.
Example:
>>> class IEEvents:
... def OnVisible(self, visible):
... print "Visible changed:", visible
...
>>> ie = DispatchWithEvents("InternetExplorer.Application", IEEvents)
>>> ie.Visible = 1
Visible changed: 1
>>>
"""
# Create/Get the object.
disp = Dispatch(clsid)
if not disp.__class__.__dict__.get("CLSID"): # Eeek - no makepy support - try and build it.
try:
ti = disp._oleobj_.GetTypeInfo()
disp_clsid = ti.GetTypeAttr()[0]
tlb, index = ti.GetContainingTypeLib()
tla = tlb.GetLibAttr()
gencache.EnsureModule(tla[0], tla[1], tla[3], tla[4], bValidateFile=0)
# Get the class from the module.
disp_class = gencache.GetClassForProgID(str(disp_clsid))
except pythoncom.com_error:
raise TypeError("This COM object can not automate the makepy process - please run makepy manually for this object")
else:
disp_class = disp.__class__
# If the clsid was an object, get the clsid
clsid = disp_class.CLSID
# Create a new class that derives from 3 classes - the dispatch class, the event sink class and the user class.
# XXX - we are still "classic style" classes in py2x, so we need can't yet
# use 'type()' everywhere - revisit soon, as py2x will move to new-style too...
try:
from types import ClassType as new_type
except ImportError:
new_type = type # py3k
events_class = getevents(clsid)
if events_class is None:
raise ValueError("This COM object does not support events.")
result_class = new_type("COMEventClass", (disp_class, events_class, user_event_class), {"__setattr__" : _event_setattr_})
instance = result_class(disp._oleobj_) # This only calls the first base class __init__.
events_class.__init__(instance, instance)
if hasattr(user_event_class, "__init__"):
user_event_class.__init__(instance)
return EventsProxy(instance)
def WithEvents(disp, user_event_class):
"""Similar to DispatchWithEvents - except that the returned
object is *not* also usable as the original Dispatch object - that is
the returned object is not dispatchable.
The difference is best summarised by example.
>>> class IEEvents:
... def OnVisible(self, visible):
... print "Visible changed:", visible
...
>>> ie = Dispatch("InternetExplorer.Application")
>>> ie_events = WithEvents(ie, IEEvents)
>>> ie.Visible = 1
Visible changed: 1
Compare with the code sample for DispatchWithEvents, where you get a
single object that is both the interface and the event handler. Note that
the event handler instance will *not* be able to use 'self.' to refer to
IE's methods and properties.
This is mainly useful where using DispatchWithEvents causes
circular reference problems that the simple proxy doesn't deal with
"""
disp = Dispatch(disp)
if not disp.__class__.__dict__.get("CLSID"): # Eeek - no makepy support - try and build it.
try:
ti = disp._oleobj_.GetTypeInfo()
disp_clsid = ti.GetTypeAttr()[0]
tlb, index = ti.GetContainingTypeLib()
tla = tlb.GetLibAttr()
gencache.EnsureModule(tla[0], tla[1], tla[3], tla[4], bValidateFile=0)
# Get the class from the module.
disp_class = gencache.GetClassForProgID(str(disp_clsid))
except pythoncom.com_error:
raise TypeError("This COM object can not automate the makepy process - please run makepy manually for this object")
else:
disp_class = disp.__class__
# Get the clsid
clsid = disp_class.CLSID
# Create a new class that derives from 2 classes - the event sink
# class and the user class.
try:
from types import ClassType as new_type
except ImportError:
new_type = type # py3k
events_class = getevents(clsid)
if events_class is None:
raise ValueError("This COM object does not support events.")
result_class = new_type("COMEventClass", (events_class, user_event_class), {})
instance = result_class(disp) # This only calls the first base class __init__.
if hasattr(user_event_class, "__init__"):
user_event_class.__init__(instance)
return instance
def getevents(clsid):
"""Determine the default outgoing interface for a class, given
either a clsid or progid. It returns a class - you can
conveniently derive your own handler from this class and implement
the appropriate methods.
This method relies on the classes produced by makepy. You must use
either makepy or the gencache module to ensure that the
appropriate support classes have been generated for the com server
that you will be handling events from.
Beware of COM circular references. When the Events class is connected
to the COM object, the COM object itself keeps a reference to the Python
events class. Thus, neither the Events instance or the COM object will
ever die by themselves. The 'close' method on the events instance
must be called to break this chain and allow standard Python collection
rules to manage object lifetimes. Note that DispatchWithEvents() does
work around this problem by the use of a proxy object, but if you use
the getevents() function yourself, you must make your own arrangements
to manage this circular reference issue.
Beware of creating Python circular references: this will happen if your
handler has a reference to an object that has a reference back to
the event source. Call the 'close' method to break the chain.
Example:
>>>win32com.client.gencache.EnsureModule('{EAB22AC0-30C1-11CF-A7EB-0000C05BAE0B}',0,1,1)
<module 'win32com.gen_py.....
>>>
>>> class InternetExplorerEvents(win32com.client.getevents("InternetExplorer.Application.1")):
... def OnVisible(self, Visible):
... print "Visibility changed: ", Visible
...
>>>
>>> ie=win32com.client.Dispatch("InternetExplorer.Application.1")
>>> events=InternetExplorerEvents(ie)
>>> ie.Visible=1
Visibility changed: 1
>>>
"""
# find clsid given progid or clsid
clsid=str(pywintypes.IID(clsid))
# return default outgoing interface for that class
klass = gencache.GetClassForCLSID(clsid)
try:
return klass.default_source
except AttributeError:
# See if we have a coclass for the interfaces.
try:
return gencache.GetClassForCLSID(klass.coclass_clsid).default_source
except AttributeError:
return None
# A Record object, as used by the COM struct support
def Record(name, object):
"""Creates a new record object, given the name of the record,
and an object from the same type library.
Example usage would be:
app = win32com.client.Dispatch("Some.Application")
point = win32com.client.Record("SomeAppPoint", app)
point.x = 0
point.y = 0
app.MoveTo(point)
"""
# XXX - to do - probably should allow "object" to already be a module object.
from . import gencache
object = gencache.EnsureDispatch(object)
module = sys.modules[object.__class__.__module__]
# to allow us to work correctly with "demand generated" code,
# we must use the typelib CLSID to obtain the module
# (otherwise we get the sub-module for the object, which
# does not hold the records)
# thus, package may be module, or may be module's parent if demand generated.
package = gencache.GetModuleForTypelib(module.CLSID, module.LCID, module.MajorVersion, module.MinorVersion)
try:
struct_guid = package.RecordMap[name]
except KeyError:
raise ValueError("The structure '%s' is not defined in module '%s'" % (name, package))
return pythoncom.GetRecordFromGuids(module.CLSID, module.MajorVersion, module.MinorVersion, module.LCID, struct_guid)
############################################
# The base of all makepy generated classes
############################################
class DispatchBaseClass:
def __init__(self, oobj=None):
if oobj is None:
oobj = pythoncom.new(self.CLSID)
elif isinstance(oobj, DispatchBaseClass):
try:
oobj = oobj._oleobj_.QueryInterface(self.CLSID, pythoncom.IID_IDispatch) # Must be a valid COM instance
except pythoncom.com_error as details:
import winerror
# Some stupid objects fail here, even tho it is _already_ IDispatch!!??
# Eg, Lotus notes.
# So just let it use the existing object if E_NOINTERFACE
if details.hresult != winerror.E_NOINTERFACE:
raise
oobj = oobj._oleobj_
self.__dict__["_oleobj_"] = oobj # so we dont call __setattr__
# Provide a prettier name than the CLSID
def __repr__(self):
# Need to get the docstring for the module for this class.
try:
mod_doc = sys.modules[self.__class__.__module__].__doc__
if mod_doc:
mod_name = "win32com.gen_py." + mod_doc
else:
mod_name = sys.modules[self.__class__.__module__].__name__
except KeyError:
mod_name = "win32com.gen_py.unknown"
return "<%s.%s instance at 0x%s>" % (mod_name, self.__class__.__name__, id(self))
# Delegate comparison to the oleobjs, as they know how to do identity.
def __eq__(self, other):
other = getattr(other, "_oleobj_", other)
return self._oleobj_ == other
def __ne__(self, other):
other = getattr(other, "_oleobj_", other)
return self._oleobj_ != other
def _ApplyTypes_(self, dispid, wFlags, retType, argTypes, user, resultCLSID, *args):
return self._get_good_object_(
self._oleobj_.InvokeTypes(dispid, 0, wFlags, retType, argTypes, *args),
user, resultCLSID)
def __getattr__(self, attr):
args=self._prop_map_get_.get(attr)
if args is None:
raise AttributeError("'%s' object has no attribute '%s'" % (repr(self), attr))
return self._ApplyTypes_(*args)
def __setattr__(self, attr, value):
if attr in self.__dict__: self.__dict__[attr] = value; return
try:
args, defArgs=self._prop_map_put_[attr]
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'" % (repr(self), attr))
self._oleobj_.Invoke(*(args + (value,) + defArgs))
def _get_good_single_object_(self, obj, obUserName=None, resultCLSID=None):
return _get_good_single_object_(obj, obUserName, resultCLSID)
def _get_good_object_(self, obj, obUserName=None, resultCLSID=None):
return _get_good_object_(obj, obUserName, resultCLSID)
# XXX - These should be consolidated with dynamic.py versions.
def _get_good_single_object_(obj, obUserName=None, resultCLSID=None):
if _PyIDispatchType==type(obj):
return Dispatch(obj, obUserName, resultCLSID)
return obj
def _get_good_object_(obj, obUserName=None, resultCLSID=None):
if obj is None:
return None
elif isinstance(obj, tuple):
obUserNameTuple = (obUserName,) * len(obj)
resultCLSIDTuple = (resultCLSID,) * len(obj)
return tuple(map(_get_good_object_, obj, obUserNameTuple, resultCLSIDTuple))
else:
return _get_good_single_object_(obj, obUserName, resultCLSID)
class CoClassBaseClass:
def __init__(self, oobj=None):
if oobj is None: oobj = pythoncom.new(self.CLSID)
self.__dict__["_dispobj_"] = self.default_interface(oobj)
def __repr__(self):
return "<win32com.gen_py.%s.%s>" % (__doc__, self.__class__.__name__)
def __getattr__(self, attr):
d=self.__dict__["_dispobj_"]
if d is not None: return getattr(d, attr)
raise AttributeError(attr)
def __setattr__(self, attr, value):
if attr in self.__dict__: self.__dict__[attr] = value; return
try:
d=self.__dict__["_dispobj_"]
if d is not None:
d.__setattr__(attr, value)
return
except AttributeError:
pass
self.__dict__[attr] = value
# A very simple VARIANT class. Only to be used with poorly-implemented COM
# objects. If an object accepts an arg which is a simple "VARIANT", but still
# is very pickly about the actual variant type (eg, isn't happy with a VT_I4,
# which it would get from a Python integer), you can use this to force a
# particular VT.
class VARIANT(object):
def __init__(self, vt, value):
self.varianttype = vt
self._value = value
# 'value' is a property so when set by pythoncom it gets any magic wrapping
# which normally happens for result objects
def _get_value(self):
return self._value
def _set_value(self, newval):
self._value = _get_good_object_(newval)
def _del_value(self):
del self._value
value = property(_get_value, _set_value, _del_value)
def __repr__(self):
return "win32com.client.VARIANT(%r, %r)" % (self.varianttype, self._value)
| apache-2.0 | -1,377,030,094,880,480,300 | 40.305302 | 133 | 0.691954 | false |
arun6582/django | django/db/backends/utils.py | 24 | 7196 | import datetime
import decimal
import hashlib
import logging
import re
from time import time
from django.conf import settings
from django.utils.encoding import force_bytes
from django.utils.timezone import utc
logger = logging.getLogger('django.db.backends')
class CursorWrapper:
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
with self.db.wrap_database_errors:
yield from self.cursor
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Close instead of passing through to avoid backend-specific behavior
# (#17671). Catch errors liberally because errors in cleanup code
# aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
start = time()
try:
return super().execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries_log.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug(
'(%.3f) %s; args=%s', duration, sql, params,
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
start = time()
try:
return super().executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries_log.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug(
'(%.3f) %s; args=%s', duration, sql, param_list,
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # return None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int((microseconds + '000000')[:6]))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if ' ' not in s:
return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently it's ignored.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(
int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo
)
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None, hash_len=4):
"""
Shorten a string to a repeatable mangled version with the given length.
If a quote stripped name contains a username, e.g. USERNAME"."TABLE,
truncate the table portion only.
"""
match = re.match(r'([^"]+)"\."([^"]+)', name)
table_name = match.group(2) if match else name
if length is None or len(table_name) <= length:
return name
hsh = hashlib.md5(force_bytes(table_name)).hexdigest()[:hash_len]
return '%s%s%s' % (match.group(1) + '"."' if match else '', table_name[:length - hash_len], hsh)
def format_number(value, max_digits, decimal_places):
"""
Format a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
if max_digits is not None:
context.prec = max_digits
if decimal_places is not None:
value = value.quantize(decimal.Decimal(".1") ** decimal_places, context=context)
else:
context.traps[decimal.Rounded] = 1
value = context.create_decimal(value)
return "{:f}".format(value)
if decimal_places is not None:
return "%.*f" % (decimal_places, value)
return "{:f}".format(value)
def strip_quotes(table_name):
"""
Strip quotes off of quoted table names to make them safe for use in index
names, sequence names, etc. For example '"USER"."TABLE"' (an Oracle naming
scheme) becomes 'USER"."TABLE'.
"""
has_quotes = table_name.startswith('"') and table_name.endswith('"')
return table_name[1:-1] if has_quotes else table_name
| bsd-3-clause | -3,647,598,684,043,985,400 | 31.125 | 100 | 0.568232 | false |
bdfoster/blumate | tests/components/binary_sensor/test_template.py | 1 | 4116 | """The tests for the Template Binary sensor platform."""
import unittest
from unittest import mock
from blumate.const import EVENT_STATE_CHANGED, MATCH_ALL
from blumate.components.binary_sensor import template
from blumate.exceptions import TemplateError
from tests.common import get_test_home_assistant
class TestBinarySensorTemplate(unittest.TestCase):
"""Test for Binary sensor template platform."""
@mock.patch.object(template, 'BinarySensorTemplate')
def test_setup(self, mock_template):
""""Test the setup."""
config = {
'sensors': {
'test': {
'friendly_name': 'virtual thingy',
'value_template': '{{ foo }}',
'sensor_class': 'motion',
},
}
}
hass = mock.MagicMock()
add_devices = mock.MagicMock()
result = template.setup_platform(hass, config, add_devices)
self.assertTrue(result)
mock_template.assert_called_once_with(hass, 'test', 'virtual thingy',
'motion', '{{ foo }}', MATCH_ALL)
add_devices.assert_called_once_with([mock_template.return_value])
def test_setup_no_sensors(self):
""""Test setup with no sensors."""
config = {}
result = template.setup_platform(None, config, None)
self.assertFalse(result)
def test_setup_invalid_device(self):
""""Test the setup with invalid devices."""
config = {
'sensors': {
'foo bar': {},
},
}
result = template.setup_platform(None, config, None)
self.assertFalse(result)
def test_setup_invalid_sensor_class(self):
""""Test setup with invalid sensor class."""
config = {
'sensors': {
'test': {
'value_template': '{{ foo }}',
'sensor_class': 'foobarnotreal',
},
},
}
result = template.setup_platform(None, config, None)
self.assertFalse(result)
def test_setup_invalid_missing_template(self):
""""Test setup with invalid and missing template."""
config = {
'sensors': {
'test': {
'sensor_class': 'motion',
},
},
}
result = template.setup_platform(None, config, None)
self.assertFalse(result)
def test_attributes(self):
""""Test the attributes."""
hass = mock.MagicMock()
vs = template.BinarySensorTemplate(hass, 'parent', 'Parent',
'motion', '{{ 1 > 1 }}', MATCH_ALL)
self.assertFalse(vs.should_poll)
self.assertEqual('motion', vs.sensor_class)
self.assertEqual('Parent', vs.name)
vs.update()
self.assertFalse(vs.is_on)
vs._template = "{{ 2 > 1 }}"
vs.update()
self.assertTrue(vs.is_on)
def test_event(self):
""""Test the event."""
hass = get_test_home_assistant()
vs = template.BinarySensorTemplate(hass, 'parent', 'Parent',
'motion', '{{ 1 > 1 }}', MATCH_ALL)
vs.update_ha_state()
hass.pool.block_till_done()
with mock.patch.object(vs, 'update') as mock_update:
hass.bus.fire(EVENT_STATE_CHANGED)
hass.pool.block_till_done()
try:
assert mock_update.call_count == 1
finally:
hass.stop()
@mock.patch('blumate.helpers.template.render')
def test_update_template_error(self, mock_render):
""""Test the template update error."""
hass = mock.MagicMock()
vs = template.BinarySensorTemplate(hass, 'parent', 'Parent',
'motion', '{{ 1 > 1 }}', MATCH_ALL)
mock_render.side_effect = TemplateError('foo')
vs.update()
mock_render.side_effect = TemplateError(
"UndefinedError: 'None' has no attribute")
vs.update()
| mit | 8,647,993,542,165,457,000 | 33.881356 | 79 | 0.531341 | false |
Eksmo/calibre | src/calibre/gui2/preferences/plugins.py | 1 | 17420 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import textwrap, os
from collections import OrderedDict
from PyQt4.Qt import (Qt, QModelIndex, QAbstractItemModel, QVariant, QIcon,
QBrush)
from calibre.gui2.preferences import ConfigWidgetBase, test_widget
from calibre.gui2.preferences.plugins_ui import Ui_Form
from calibre.customize.ui import (initialized_plugins, is_disabled, enable_plugin,
disable_plugin, plugin_customization, add_plugin,
remove_plugin, NameConflict)
from calibre.gui2 import (NONE, error_dialog, info_dialog, choose_files,
question_dialog, gprefs)
from calibre.utils.search_query_parser import SearchQueryParser
from calibre.utils.icu import lower
from calibre.constants import iswindows
class PluginModel(QAbstractItemModel, SearchQueryParser): # {{{
def __init__(self, show_only_user_plugins=False):
QAbstractItemModel.__init__(self)
SearchQueryParser.__init__(self, ['all'])
self.show_only_user_plugins = show_only_user_plugins
self.icon = QVariant(QIcon(I('plugins.png')))
p = QIcon(self.icon).pixmap(32, 32, QIcon.Disabled, QIcon.On)
self.disabled_icon = QVariant(QIcon(p))
self._p = p
self.populate()
def toggle_shown_plugins(self, show_only_user_plugins):
self.show_only_user_plugins = show_only_user_plugins
self.populate()
self.reset()
def populate(self):
self._data = {}
for plugin in initialized_plugins():
if (getattr(plugin, 'plugin_path', None) is None
and self.show_only_user_plugins):
continue
if plugin.type not in self._data:
self._data[plugin.type] = [plugin]
else:
self._data[plugin.type].append(plugin)
self.categories = sorted(self._data.keys())
for plugins in self._data.values():
plugins.sort(cmp=lambda x, y: cmp(x.name.lower(), y.name.lower()))
def universal_set(self):
ans = set([])
for c, category in enumerate(self.categories):
ans.add((c, -1))
for p, plugin in enumerate(self._data[category]):
ans.add((c, p))
return ans
def get_matches(self, location, query, candidates=None):
if candidates is None:
candidates = self.universal_set()
ans = set([])
if not query:
return ans
query = lower(query)
for c, p in candidates:
if p < 0:
if query in lower(self.categories[c]):
ans.add((c, p))
continue
else:
try:
plugin = self._data[self.categories[c]][p]
except:
continue
if query in lower(plugin.name) or query in lower(plugin.author) or \
query in lower(plugin.description):
ans.add((c, p))
return ans
def find(self, query):
query = query.strip()
if not query:
return QModelIndex()
matches = self.parse(query)
if not matches:
return QModelIndex()
matches = list(sorted(matches))
c, p = matches[0]
cat_idx = self.index(c, 0, QModelIndex())
if p == -1:
return cat_idx
return self.index(p, 0, cat_idx)
def find_next(self, idx, query, backwards=False):
query = query.strip()
if not query:
return idx
matches = self.parse(query)
if not matches:
return idx
if idx.parent().isValid():
loc = (idx.parent().row(), idx.row())
else:
loc = (idx.row(), -1)
if loc not in matches:
return self.find(query)
if len(matches) == 1:
return QModelIndex()
matches = list(sorted(matches))
i = matches.index(loc)
if backwards:
ans = i - 1 if i - 1 >= 0 else len(matches)-1
else:
ans = i + 1 if i + 1 < len(matches) else 0
ans = matches[ans]
return self.index(ans[0], 0, QModelIndex()) if ans[1] < 0 else \
self.index(ans[1], 0, self.index(ans[0], 0, QModelIndex()))
def index(self, row, column, parent=QModelIndex()):
if not self.hasIndex(row, column, parent):
return QModelIndex()
if parent.isValid():
return self.createIndex(row, column, 1+parent.row())
else:
return self.createIndex(row, column, 0)
def parent(self, index):
if not index.isValid() or index.internalId() == 0:
return QModelIndex()
return self.createIndex(index.internalId()-1, 0, 0)
def rowCount(self, parent):
if not parent.isValid():
return len(self.categories)
if parent.internalId() == 0:
category = self.categories[parent.row()]
return len(self._data[category])
return 0
def columnCount(self, parent):
return 1
def index_to_plugin(self, index):
category = self.categories[index.parent().row()]
return self._data[category][index.row()]
def plugin_to_index(self, plugin):
for i, category in enumerate(self.categories):
parent = self.index(i, 0, QModelIndex())
for j, p in enumerate(self._data[category]):
if plugin == p:
return self.index(j, 0, parent)
return QModelIndex()
def plugin_to_index_by_properties(self, plugin):
for i, category in enumerate(self.categories):
parent = self.index(i, 0, QModelIndex())
for j, p in enumerate(self._data[category]):
if plugin.name == p.name and plugin.type == p.type and \
plugin.author == p.author and plugin.version == p.version:
return self.index(j, 0, parent)
return QModelIndex()
def refresh_plugin(self, plugin, rescan=False):
if rescan:
self.populate()
idx = self.plugin_to_index(plugin)
self.dataChanged.emit(idx, idx)
def flags(self, index):
if not index.isValid():
return 0
flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled
return flags
def data(self, index, role):
if not index.isValid():
return NONE
if index.internalId() == 0:
if role == Qt.DisplayRole:
category = self.categories[index.row()]
return QVariant(_("%(plugin_type)s %(plugins)s")%\
dict(plugin_type=category, plugins=_('plugins')))
else:
plugin = self.index_to_plugin(index)
if role == Qt.DisplayRole:
ver = '.'.join(map(str, plugin.version))
desc = '\n'.join(textwrap.wrap(plugin.description, 100))
ans='%s (%s) %s %s\n%s'%(plugin.name, ver, _('by'), plugin.author, desc)
c = plugin_customization(plugin)
if c:
ans += _('\nCustomization: ')+c
return QVariant(ans)
if role == Qt.DecorationRole:
return self.disabled_icon if is_disabled(plugin) else self.icon
if role == Qt.ForegroundRole and is_disabled(plugin):
return QVariant(QBrush(Qt.gray))
if role == Qt.UserRole:
return plugin
return NONE
# }}}
class ConfigWidget(ConfigWidgetBase, Ui_Form):
supports_restoring_to_defaults = False
def genesis(self, gui):
self.gui = gui
self._plugin_model = PluginModel(self.user_installed_plugins.isChecked())
self.plugin_view.setModel(self._plugin_model)
self.plugin_view.setStyleSheet(
"QTreeView::item { padding-bottom: 10px;}")
self.plugin_view.doubleClicked.connect(self.double_clicked)
self.toggle_plugin_button.clicked.connect(self.toggle_plugin)
self.customize_plugin_button.clicked.connect(self.customize_plugin)
self.remove_plugin_button.clicked.connect(self.remove_plugin)
self.button_plugin_add.clicked.connect(self.add_plugin)
self.button_plugin_updates.clicked.connect(self.update_plugins)
self.button_plugin_new.clicked.connect(self.get_plugins)
self.search.initialize('plugin_search_history',
help_text=_('Search for plugin'))
self.search.search.connect(self.find)
self.next_button.clicked.connect(self.find_next)
self.previous_button.clicked.connect(self.find_previous)
self.changed_signal.connect(self.reload_store_plugins)
self.user_installed_plugins.stateChanged.connect(self.show_user_installed_plugins)
def show_user_installed_plugins(self, state):
self._plugin_model.toggle_shown_plugins(self.user_installed_plugins.isChecked())
def find(self, query):
idx = self._plugin_model.find(query)
if not idx.isValid():
return info_dialog(self, _('No matches'),
_('Could not find any matching plugins'), show=True,
show_copy_button=False)
self.highlight_index(idx)
def highlight_index(self, idx):
self.plugin_view.selectionModel().select(idx,
self.plugin_view.selectionModel().ClearAndSelect)
self.plugin_view.setCurrentIndex(idx)
self.plugin_view.setFocus(Qt.OtherFocusReason)
self.plugin_view.scrollTo(idx, self.plugin_view.EnsureVisible)
def find_next(self, *args):
idx = self.plugin_view.currentIndex()
if not idx.isValid():
idx = self._plugin_model.index(0, 0)
idx = self._plugin_model.find_next(idx,
unicode(self.search.currentText()))
self.highlight_index(idx)
def find_previous(self, *args):
idx = self.plugin_view.currentIndex()
if not idx.isValid():
idx = self._plugin_model.index(0, 0)
idx = self._plugin_model.find_next(idx,
unicode(self.search.currentText()), backwards=True)
self.highlight_index(idx)
def toggle_plugin(self, *args):
self.modify_plugin(op='toggle')
def double_clicked(self, index):
if index.parent().isValid():
self.modify_plugin(op='customize')
def customize_plugin(self, *args):
self.modify_plugin(op='customize')
def remove_plugin(self, *args):
self.modify_plugin(op='remove')
def add_plugin(self):
info = '' if iswindows else ' [.zip %s]'%_('files')
path = choose_files(self, 'add a plugin dialog', _('Add plugin'),
filters=[(_('Plugins') + info, ['zip'])], all_files=False,
select_only_single_file=True)
if not path:
return
path = path[0]
if path and os.access(path, os.R_OK) and path.lower().endswith('.zip'):
if not question_dialog(self, _('Are you sure?'), '<p>' + \
_('Installing plugins is a <b>security risk</b>. '
'Plugins can contain a virus/malware. '
'Only install it if you got it from a trusted source.'
' Are you sure you want to proceed?'),
show_copy_button=False):
return
try:
plugin = add_plugin(path)
except NameConflict as e:
return error_dialog(self, _('Already exists'),
unicode(e), show=True)
self._plugin_model.populate()
self._plugin_model.reset()
self.changed_signal.emit()
self.check_for_add_to_toolbars(plugin)
info_dialog(self, _('Success'),
_('Plugin <b>{0}</b> successfully installed under <b>'
' {1} plugins</b>. You may have to restart calibre '
'for the plugin to take effect.').format(plugin.name, plugin.type),
show=True, show_copy_button=False)
idx = self._plugin_model.plugin_to_index_by_properties(plugin)
if idx.isValid():
self.highlight_index(idx)
else:
error_dialog(self, _('No valid plugin path'),
_('%s is not a valid plugin path')%path).exec_()
def modify_plugin(self, op=''):
index = self.plugin_view.currentIndex()
if index.isValid():
if not index.parent().isValid():
name = unicode(index.data().toString())
return error_dialog(self, _('Error'), '<p>'+
_('Select an actual plugin under <b>%s</b> to customize')%name,
show=True, show_copy_button=False)
plugin = self._plugin_model.index_to_plugin(index)
if op == 'toggle':
if not plugin.can_be_disabled:
error_dialog(self,_('Plugin cannot be disabled'),
_('The plugin: %s cannot be disabled')%plugin.name).exec_()
return
if is_disabled(plugin):
enable_plugin(plugin)
else:
disable_plugin(plugin)
self._plugin_model.refresh_plugin(plugin)
self.changed_signal.emit()
if op == 'customize':
if not plugin.is_customizable():
info_dialog(self, _('Plugin not customizable'),
_('Plugin: %s does not need customization')%plugin.name).exec_()
return
self.changed_signal.emit()
from calibre.customize import InterfaceActionBase
if isinstance(plugin, InterfaceActionBase) and not getattr(plugin,
'actual_iaction_plugin_loaded', False):
return error_dialog(self, _('Must restart'),
_('You must restart calibre before you can'
' configure the <b>%s</b> plugin')%plugin.name, show=True)
if plugin.do_user_config(self.gui):
self._plugin_model.refresh_plugin(plugin)
elif op == 'remove':
msg = _('Plugin <b>{0}</b> successfully removed').format(plugin.name)
if remove_plugin(plugin):
self._plugin_model.populate()
self._plugin_model.reset()
self.changed_signal.emit()
info_dialog(self, _('Success'), msg, show=True,
show_copy_button=False)
else:
error_dialog(self, _('Cannot remove builtin plugin'),
plugin.name + _(' cannot be removed. It is a '
'builtin plugin. Try disabling it instead.')).exec_()
def get_plugins(self):
self.update_plugins(not_installed=True)
def update_plugins(self, not_installed=False):
from calibre.gui2.dialogs.plugin_updater import (PluginUpdaterDialog,
FILTER_UPDATE_AVAILABLE, FILTER_NOT_INSTALLED)
mode = FILTER_NOT_INSTALLED if not_installed else FILTER_UPDATE_AVAILABLE
d = PluginUpdaterDialog(self.gui, initial_filter=mode)
d.exec_()
self._plugin_model.populate()
self._plugin_model.reset()
self.changed_signal.emit()
def reload_store_plugins(self):
self.gui.load_store_plugins()
if self.gui.iactions.has_key('Store'):
self.gui.iactions['Store'].load_menu()
def check_for_add_to_toolbars(self, plugin):
from calibre.gui2.preferences.toolbar import ConfigWidget
from calibre.customize import InterfaceActionBase
if not isinstance(plugin, InterfaceActionBase):
return
all_locations = OrderedDict(ConfigWidget.LOCATIONS)
plugin_action = plugin.load_actual_plugin(self.gui)
installed_actions = OrderedDict([
(key, list(gprefs.get('action-layout-'+key, [])))
for key in all_locations])
# If already installed in a GUI container, do nothing
for action_names in installed_actions.itervalues():
if plugin_action.name in action_names:
return
allowed_locations = [(key, text) for key, text in
all_locations.iteritems() if key
not in plugin_action.dont_add_to]
if not allowed_locations:
return # This plugin doesn't want to live in the GUI
from calibre.gui2.dialogs.choose_plugin_toolbars import ChoosePluginToolbarsDialog
d = ChoosePluginToolbarsDialog(self, plugin_action, allowed_locations)
if d.exec_() == d.Accepted:
for key, text in d.selected_locations():
installed_actions = list(gprefs.get('action-layout-'+key, []))
installed_actions.append(plugin_action.name)
gprefs['action-layout-'+key] = tuple(installed_actions)
if __name__ == '__main__':
from PyQt4.Qt import QApplication
app = QApplication([])
test_widget('Advanced', 'Plugins')
| gpl-3.0 | -5,514,348,601,313,937,000 | 39.511628 | 92 | 0.564868 | false |
wdm0006/Flask-Blogging | test/test_sqlastorage.py | 1 | 13493 | try:
from builtins import range
except ImportError:
pass
import unittest
import tempfile
import os
from flask_blogging.sqlastorage import SQLAStorage
from sqlalchemy import create_engine
from test import FlaskBloggingTestCase
import sqlalchemy as sqla
import time
try:
import _mysql
HAS_MYSQL = True
except ImportError:
HAS_MYSQL = False
try:
import psycopg2
HAS_POSTGRES = True
except ImportError:
HAS_POSTGRES = False
class TestSQLiteStorage(FlaskBloggingTestCase):
def _create_storage(self):
temp_dir = tempfile.gettempdir()
self._dbfile = os.path.join(temp_dir, "temp.db")
self._engine = create_engine('sqlite:///'+self._dbfile)
self._meta = sqla.MetaData()
self.storage = SQLAStorage(self._engine, metadata=self._meta)
self._meta.create_all(bind=self._engine)
def setUp(self):
FlaskBloggingTestCase.setUp(self)
self._create_storage()
def tearDown(self):
os.remove(self._dbfile)
def test_post_table_exists(self):
table_name = "post"
with self._engine.begin() as conn:
self.assertTrue(conn.dialect.has_table(conn, table_name))
metadata = self._meta
table = metadata.tables[table_name]
columns = [t.name for t in table.columns]
expected_columns = ['id', 'title', 'text', 'post_date',
'last_modified_date', 'draft']
self.assertListEqual(columns, expected_columns)
def test_tag_table_exists(self):
table_name = "tag"
with self._engine.begin() as conn:
self.assertTrue(conn.dialect.has_table(conn, table_name))
metadata = self._meta
table = metadata.tables[table_name]
columns = [t.name for t in table.columns]
expected_columns = ['id', 'text']
self.assertListEqual(columns, expected_columns)
def test_tag_post_table_exists(self):
table_name = "tag_posts"
with self._engine.begin() as conn:
self.assertTrue(conn.dialect.has_table(conn, table_name))
metadata = self._meta
table = metadata.tables[table_name]
columns = [t.name for t in table.columns]
expected_columns = ['tag_id', 'post_id']
self.assertListEqual(columns, expected_columns)
def test_user_post_table_exists(self):
table_name = "user_posts"
with self._engine.begin() as conn:
self.assertTrue(conn.dialect.has_table(conn, table_name))
metadata = self._meta
table = metadata.tables[table_name]
columns = [t.name for t in table.columns]
expected_columns = ['user_id', 'post_id']
self.assertListEqual(columns, expected_columns)
def test_user_post_table_consistency(self):
# check if the user post table updates the user_id
user_id = 1
post_id = 5
pid = self.storage.save_post(title="Title", text="Sample Text",
user_id="user", tags=["hello", "world"])
posts = self.storage.get_posts()
self.assertEqual(len(posts), 1)
self.storage.save_post(title="Title", text="Sample Text",
user_id="newuser", tags=["hello", "world"],
post_id=pid)
self.assertEqual(len(posts), 1)
return
def test_tags_uniqueness(self):
table_name = "tag"
metadata = self._meta
table = metadata.tables[table_name]
with self._engine.begin() as conn:
statement = table.insert().values(text="test_tag")
conn.execute(statement)
# reentering same tag should raise exception
with self._engine.begin() as conn:
statement = table.insert().values(text="test_tag")
self.assertRaises(sqla.exc.IntegrityError, conn.execute, statement)
def test_tags_consistency(self):
# check that when tag is updated, the posts get updated
tags = ["hello", "world"]
pid = self.storage.save_post(title="Title", text="Sample Text",
user_id="user", tags=tags)
post = self.storage.get_post_by_id(pid)
self.assertEqual(len(post["tags"]), 2)
tags.pop()
pid = self.storage.save_post(title="Title", text="Sample Text",
user_id="user", tags=tags, post_id=pid)
post = self.storage.get_post_by_id(pid)
self.assertEqual(len(post["tags"]), 1)
def test_tag_post_uniqueness(self):
self.storage.save_post(title="Title", text="Sample Text",
user_id="user", tags=["tags"])
table_name = "tag_posts"
metadata = self._meta
table = metadata.tables[table_name]
with self._engine.begin() as conn:
statement = table.insert().values(tag_id=1, post_id=1)
self.assertRaises(sqla.exc.IntegrityError, conn.execute, statement)
def test_user_post_uniqueness(self):
pid = self.storage.save_post(title="Title1", text="Sample Text",
user_id="testuser",
tags=["hello", "world"])
table_name = "user_posts"
metadata = sqla.MetaData()
metadata.reflect(bind=self._engine)
table = metadata.tables[table_name]
# reentering same user should raise exception
with self._engine.begin() as conn:
statement = table.insert().values(user_id="testuser",
post_id=pid)
self.assertRaises(sqla.exc.IntegrityError, conn.execute, statement)
def test_bind_database(self):
# self.storage._create_all_tables()
self.test_post_table_exists()
self.test_tag_table_exists()
self.test_tag_post_table_exists()
self.test_user_post_table_exists()
def test_save_post(self):
pid = self.storage.save_post(title="Title1", text="Sample Text",
user_id="testuser",
tags=["hello", "world"])
pid = self.storage.save_post(title="Title1", text="Sample Text",
user_id="testuser",
tags=["hello", "world"], post_id=1)
p = self.storage.get_post_by_id(2)
self.assertIsNone(p)
# invalid post_id will be treated as inserts
pid = self.storage.save_post(title="Title1", text="Sample Text",
user_id="testuser",
tags=["hello", "world"],
post_id=5)
self.assertNotEqual(pid, 5)
self.assertEqual(pid, 2)
p = self.storage.get_post_by_id(2)
self.assertIsNotNone(p)
def test_delete_post(self):
# insert, check exists, delete, check doesn't exist anymore
pid = self.storage.save_post(title="Title1", text="Sample Text",
user_id="testuser",
tags=["hello", "world"])
p = self.storage.get_post_by_id(pid)
self.assertIsNotNone(p)
self.storage.delete_post(pid)
p = self.storage.get_post_by_id(pid)
self.assertIsNone(p)
# insert again.
pid = self.storage.save_post(title="Title1", text="Sample Text",
user_id="testuser",
tags=["hello", "world"],
post_id=1)
p = self.storage.get_post_by_id(pid)
self.assertIsNotNone(p)
def test_get_post_by_id(self):
pid1 = self.storage.save_post(title="Title1", text="Sample Text1",
user_id="testuser",
tags=["hello", "world"])
pid2 = self.storage.save_post(title="Title2", text="Sample Text2",
user_id="testuser",
tags=["hello", "my", "world"])
post = self.storage.get_post_by_id(pid1)
self._assert_post(post, "Title1", "Sample Text1", "testuser",
["HELLO", "WORLD"])
post = self.storage.get_post_by_id(pid2)
self._assert_post(post, "Title2", "Sample Text2", "testuser",
["HELLO", "MY", "WORLD"])
def _assert_post(self, post, title, text, user_id, tags):
tags = set([t.upper() for t in tags])
self.assertSetEqual(set(post["tags"]), tags)
self.assertEqual(post["title"], title)
self.assertEqual(post["text"], text)
self.assertEqual(post["user_id"], user_id)
def test_get_posts(self):
self._create_dummy_data()
# test default queries
posts = self.storage.get_posts()
self.assertEqual(len(posts), 10)
ctr = 19
for post in posts:
self._assert_post(post, "Title%d" % ctr,
"Sample Text%d" % ctr, "newuser", ["world"])
ctr -= 1
posts = self.storage.get_posts(recent=False)
self.assertEqual(len(posts), 10)
ctr = 0
for post in posts:
self._assert_post(post, "Title%d" % ctr,
"Sample Text%d" % ctr, "testuser", ["hello"])
ctr += 1
# test count and offset
posts = self.storage.get_posts(count=5, offset=5, recent=False)
self.assertEqual(len(posts), 5)
ctr = 5
for post in posts:
self._assert_post(post, "Title%d" % ctr,
"Sample Text%d" % ctr, "testuser", ["hello"])
ctr += 1
# test tag feature
posts = self.storage.get_posts(tag="hello", recent=False)
self.assertEqual(len(posts), 10)
ctr = 0
for post in posts:
self._assert_post(post, "Title%d" % ctr,
"Sample Text%d" % ctr, "testuser", ["hello"])
ctr += 1
posts = self.storage.get_posts(tag="world", recent=False)
self.assertEqual(len(posts), 10)
ctr = 10
for post in posts:
self._assert_post(post, "Title%d" % ctr,
"Sample Text%d" % ctr, "newuser", ["world"])
ctr += 1
# test user_id feature
posts = self.storage.get_posts(user_id="newuser", recent=True)
self.assertEqual(len(posts), 10)
ctr = 19
for post in posts:
self._assert_post(post, "Title%d" % ctr,
"Sample Text%d" % ctr, "newuser", ["world"])
ctr -= 1
posts = self.storage.get_posts(user_id="testuser", recent=True)
self.assertEqual(len(posts), 10)
ctr = 9
for post in posts:
self._assert_post(post, "Title%d" % ctr,
"Sample Text%d" % ctr, "testuser", ["hello"])
ctr -= 1
return
def test_count_posts(self):
self._create_dummy_data()
count = self.storage.count_posts()
self.assertEqual(count, 20)
# test user
count = self.storage.count_posts(user_id="testuser")
self.assertEqual(count, 10)
count = self.storage.count_posts(user_id="newuser")
self.assertEqual(count, 10)
count = self.storage.count_posts(user_id="testuser")
self.assertEqual(count, 10)
# test tags
count = self.storage.count_posts(tag="hello")
self.assertEqual(count, 10)
count = self.storage.count_posts(tag="world")
self.assertEqual(count, 10)
# multiple queries
count = self.storage.count_posts(user_id="testuser", tag="world")
self.assertEqual(count, 0)
def _create_dummy_data(self):
for i in range(20):
tags = ["hello"] if i < 10 else ["world"]
user = "testuser" if i < 10 else "newuser"
self.storage.save_post(title="Title%d" % i,
text="Sample Text%d" % i,
user_id=user, tags=tags)
time.sleep(1)
@unittest.skipUnless(HAS_MYSQL, "Package mysql-python needs to be install to "
"run this test.")
class TestMySQLStorage(TestSQLiteStorage):
def _create_storage(self):
self._engine = create_engine(
"mysql+mysqldb://root:@localhost/flask_blogging")
self._meta = sqla.MetaData()
self.storage = SQLAStorage(self._engine, metadata=self._meta)
self._meta.create_all(bind=self._engine)
def tearDown(self):
metadata = sqla.MetaData()
metadata.reflect(bind=self._engine)
metadata.drop_all(bind=self._engine)
@unittest.skipUnless(HAS_POSTGRES, "Requires psycopg2 Postgres package")
class TestPostgresStorage(TestSQLiteStorage):
def _create_storage(self):
self._engine = create_engine(
"postgresql+psycopg2://postgres:@localhost/flask_blogging",
isolation_level="AUTOCOMMIT")
self._meta = sqla.MetaData()
self.storage = SQLAStorage(self._engine, metadata=self._meta)
self._meta.create_all(bind=self._engine)
def tearDown(self):
metadata = sqla.MetaData()
metadata.reflect(bind=self._engine)
metadata.drop_all(bind=self._engine)
| mit | 8,523,674,342,013,697,000 | 37.99711 | 79 | 0.547173 | false |
Abjad/abjad | abjad/indicators/StartTrillSpan.py | 1 | 7210 | import typing
from ..bundle import LilyPondFormatBundle
from ..overrides import TweakInterface
from ..pitch.intervals import NamedInterval
from ..pitch.pitches import NamedPitch
from ..storage import StorageFormatManager
class StartTrillSpan:
r"""
LilyPond ``\startTrillSpan`` command.
.. container:: example
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> start_trill_span = abjad.StartTrillSpan()
>>> abjad.tweak(start_trill_span).color = "#blue"
>>> abjad.attach(start_trill_span, staff[0])
>>> stop_trill_span = abjad.StopTrillSpan()
>>> abjad.attach(stop_trill_span, staff[-1])
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
c'4
- \tweak color #blue
\startTrillSpan
d'4
e'4
f'4
\stopTrillSpan
}
"""
### CLASS VARIABLES ###
__slots__ = ("_interval", "_pitch", "_tweaks")
_context = "Voice"
_parameter = "TRILL"
_persistent = True
### INITIALIZER ###
def __init__(
self,
*,
interval: typing.Union[str, NamedInterval] = None,
pitch: typing.Union[str, NamedPitch] = None,
tweaks: TweakInterface = None,
) -> None:
if interval is not None:
interval = NamedInterval(interval)
self._interval = interval
if pitch is not None:
pitch = NamedPitch(pitch)
self._pitch = pitch
if tweaks is not None:
assert isinstance(tweaks, TweakInterface), repr(tweaks)
self._tweaks = TweakInterface.set_tweaks(self, tweaks)
### SPECIAL METHODS ###
def __eq__(self, argument) -> bool:
"""
Is true when all initialization values of Abjad value object equal
the initialization values of ``argument``.
"""
return StorageFormatManager.compare_objects(self, argument)
def __hash__(self) -> int:
"""
Hashes Abjad value object.
"""
hash_values = StorageFormatManager(self).get_hash_values()
try:
result = hash(hash_values)
except TypeError:
raise TypeError(f"unhashable type: {self}")
return result
def __repr__(self) -> str:
"""
Gets interpreter representation.
"""
return StorageFormatManager(self).get_repr_format()
### PRIVATE METHODS ###
def _get_lilypond_format_bundle(self, component=None):
bundle = LilyPondFormatBundle()
if self.tweaks:
tweaks = self.tweaks._list_format_contributions()
bundle.after.spanner_starts.extend(tweaks)
string = r"\startTrillSpan"
if self.interval or self.pitch:
bundle.opening.spanners.append(r"\pitchedTrill")
if self.pitch:
pitch = self.pitch
else:
pitch = component.written_pitch + self.interval
string = string + f" {pitch!s}"
bundle.after.spanner_starts.append(string)
return bundle
### PUBLIC PROPERTIES ###
@property
def context(self) -> str:
"""
Returns (historically conventional) context ``'Voice'``.
.. container:: example
>>> abjad.StartTrillSpan().context
'Voice'
Class constant.
Override with ``abjad.attach(..., context='...')``.
"""
return self._context
@property
def interval(self) -> typing.Optional[NamedInterval]:
r"""
Gets interval.
.. container:: example
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> start_trill_span = abjad.StartTrillSpan(interval='M2')
>>> abjad.tweak(start_trill_span).color = "#blue"
>>> abjad.attach(start_trill_span, staff[0])
>>> stop_trill_span = abjad.StopTrillSpan()
>>> abjad.attach(stop_trill_span, staff[-1])
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\pitchedTrill
c'4
- \tweak color #blue
\startTrillSpan d'
d'4
e'4
f'4
\stopTrillSpan
}
"""
return self._interval
@property
def parameter(self) -> str:
"""
Returns ``'TRILL'``.
.. container:: example
>>> abjad.StartTrillSpan().parameter
'TRILL'
Class constant.
"""
return self._parameter
@property
def persistent(self) -> bool:
"""
Is true.
.. container:: example
>>> abjad.StartTrillSpan().persistent
True
Class constant.
"""
return self._persistent
@property
def pitch(self) -> typing.Optional[NamedPitch]:
r"""
Gets pitch.
.. container:: example
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> start_trill_span = abjad.StartTrillSpan(pitch='C#4')
>>> abjad.tweak(start_trill_span).color = "#blue"
>>> abjad.attach(start_trill_span, staff[0])
>>> stop_trill_span = abjad.StopTrillSpan()
>>> abjad.attach(stop_trill_span, staff[-1])
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\pitchedTrill
c'4
- \tweak color #blue
\startTrillSpan cs'
d'4
e'4
f'4
\stopTrillSpan
}
"""
return self._pitch
@property
def spanner_start(self) -> bool:
"""
Is true.
.. container:: example
>>> abjad.StartTrillSpan().spanner_start
True
"""
return True
@property
def tweaks(self) -> typing.Optional[TweakInterface]:
r"""
Gets tweaks
.. container:: example
REGRESSION. Tweaks survive copy:
>>> import copy
>>> start_trill_span = abjad.StartTrillSpan()
>>> abjad.tweak(start_trill_span).color = "#blue"
>>> string = abjad.storage(start_trill_span)
>>> print(string)
abjad.StartTrillSpan(
tweaks=TweakInterface(('_literal', None), ('color', '#blue')),
)
>>> start_trill_span_2 = copy.copy(start_trill_span)
>>> string = abjad.storage(start_trill_span_2)
>>> print(string)
abjad.StartTrillSpan(
tweaks=TweakInterface(('_literal', None), ('color', '#blue')),
)
"""
return self._tweaks
| gpl-3.0 | 3,452,362,261,971,732,000 | 26.003745 | 78 | 0.496533 | false |
klipstein/dojango | dojango/util/dojo_collector.py | 12 | 1196 | from threading import local
__all__ = ['activate', 'deactivate', 'get_collector', 'add_module']
_active = local()
def activate():
"""
Activates a global accessible object, where we can save information about
required dojo modules.
"""
class Collector:
used_dojo_modules = []
def add(self, module):
# just add a module once!
if not module in self.used_dojo_modules:
self.used_dojo_modules.append(module)
_active.value = Collector()
def deactivate():
"""
Resets the currently active global object
"""
if hasattr(_active, "value"):
del _active.value
def get_collector():
"""Returns the currently active collector object."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t
except AttributeError:
return None
return None
def get_modules():
collector = get_collector()
if collector is not None:
return collector.used_dojo_modules
return []
def add_module(module):
collector = get_collector()
if collector is not None:
collector.add(module)
# otherwise do nothing
pass
| bsd-3-clause | -1,829,347,207,035,283,500 | 22.45098 | 77 | 0.60786 | false |
terrameijar/oppia | core/jobs_registry.py | 2 | 3538 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Job registries."""
from core.domain import activity_jobs_one_off
from core.domain import collection_jobs_one_off
from core.domain import exp_jobs_one_off
from core.domain import feedback_jobs_one_off
from core.domain import feedback_jobs_continuous
from core.domain import stats_jobs_continuous
from core.domain import stats_jobs_one_off
from core.domain import user_jobs_continuous
from core.domain import user_jobs_one_off
from core.domain import email_jobs_one_off
from core.domain import recommendations_jobs_one_off
# List of all manager classes for one-off batch jobs for which to show controls
# on the admin dashboard.
ONE_OFF_JOB_MANAGERS = [
activity_jobs_one_off.IndexAllActivitiesJobManager,
collection_jobs_one_off.CollectionMigrationJob,
email_jobs_one_off.EmailHashRegenerationOneOffJob,
exp_jobs_one_off.ExpSummariesContributorsOneOffJob,
exp_jobs_one_off.ExpSummariesCreationOneOffJob,
exp_jobs_one_off.ExplorationContributorsSummaryOneOffJob,
exp_jobs_one_off.ExplorationFirstPublishedOneOffJob,
exp_jobs_one_off.ExplorationMigrationJobManager,
exp_jobs_one_off.ExplorationValidityJobManager,
exp_jobs_one_off.ItemSelectionInteractionOneOffJob,
exp_jobs_one_off.ViewableExplorationsAuditJob,
feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob,
recommendations_jobs_one_off.ExplorationRecommendationsOneOffJob,
stats_jobs_one_off.StatisticsAudit,
user_jobs_one_off.DashboardSubscriptionsOneOffJob,
user_jobs_one_off.LongUserBiosOneOffJob,
user_jobs_one_off.UserContributionsOneOffJob,
user_jobs_one_off.UserDefaultDashboardOneOffJob,
user_jobs_one_off.UserFirstContributionMsecOneOffJob,
user_jobs_one_off.UserLastExplorationActivityOneOffJob,
user_jobs_one_off.UserProfilePictureOneOffJob,
user_jobs_one_off.UsernameLengthDistributionOneOffJob,
]
# List of all ContinuousComputation managers to show controls for on the
# admin dashboard.
# NOTE TO DEVELOPERS: When a new ContinuousComputation manager is defined,
# it should be registered here.
ALL_CONTINUOUS_COMPUTATION_MANAGERS = [
feedback_jobs_continuous.FeedbackAnalyticsAggregator,
stats_jobs_continuous.InteractionAnswerSummariesAggregator,
stats_jobs_continuous.StatisticsAggregator,
user_jobs_continuous.DashboardRecentUpdatesAggregator,
user_jobs_continuous.UserStatsAggregator,
]
class ContinuousComputationEventDispatcher(object):
"""Dispatches events to the relevant ContinuousComputation classes."""
@classmethod
def dispatch_event(cls, event_type, *args, **kwargs):
"""Dispatches an incoming event to the ContinuousComputation
classes which listen to events of that type.
"""
for klass in ALL_CONTINUOUS_COMPUTATION_MANAGERS:
if event_type in klass.get_event_types_listened_to():
klass.on_incoming_event(event_type, *args, **kwargs)
| apache-2.0 | 2,811,917,874,789,958,700 | 42.679012 | 79 | 0.782928 | false |
spywhere/Terminality | tests/test_macro_parser.py | 1 | 14051 | import sublime
import unittest
from unittest.mock import patch, MagicMock
from Terminality.macro import Macro
def file_content(region):
contents = """
Hello, World!
This might be a long file
In which use to test something
Blah blah blah...
"""
return contents[region.begin():region.end()]
MockView = MagicMock(spec=sublime.View)
MockView.substr = MagicMock(side_effect=file_content)
MockView.file_name.return_value = "path/to/file.ext"
MockWindow = MagicMock(spec=sublime.Window)
MockWindow.active_view.return_value = MockView
MockWindow.folders.return_value = ["another/path/to/directory",
"path/to"]
class TestMacroParser(unittest.TestCase):
@patch('sublime.active_window', return_value=MockWindow)
def test_none(self, active_window):
macros = {
"test": None,
"expected": None,
"required": None,
"macros": None
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_empty(self, active_window):
macros = {
"test": "",
"expected": "",
"required": [],
"macros": {}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_predefined_macro1(self, active_window):
macros = {
"test": "",
"expected": "",
"required": ["file", "file_name"],
"macros": {}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_predefined_macro2(self, active_window):
macros = {
"test": "$file_name",
"expected": None,
"required": ["required", "file_name"],
"macros": {}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_predefined_macro3(self, active_window):
macros = {
"test": "$require ; $file",
"expected": " ; path/to/file.ext",
"required": [],
"macros": {}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_predefined_macro4(self, active_window):
macros = {
"test": "$parent$file$file_name",
"expected": "path/topath/to/file.extfile.ext",
"required": [],
"macros": {}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_predefined_macro5(self, active_window):
macros = {
"test": "$working$$$working_project$$$project",
"expected": "path/to$path/to$another/path/to/directory",
"required": [],
"macros": {}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_required_macro1(self, active_window):
macros = {
"test": "",
"expected": None,
"required": ["required"],
"macros": {}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_required_macro2(self, active_window):
macros = {
"test": "",
"expected": None,
"required": ["required"],
"macros": {
"required": []
}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_required_macro3(self, active_window):
macros = {
"test": "",
"expected": None,
"required": ["required"],
"macros": {
"required": [
1,
[1, 2],
None,
[None, None]
]
}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_required_macro4(self, active_window):
macros = {
"test": "",
"expected": "",
"required": ["required"],
"macros": {
"required": [
1,
[1, 2],
None,
[None, None],
"macro_output"
]
}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_required_macro5(self, active_window):
macros = {
"test": "$required",
"expected": "",
"required": [],
"macros": {
"required": [
1,
[1, 2],
None,
[None, None]
]
}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_required_macro6(self, active_window):
macros = {
"test": "$selection",
"expected": "",
"required": [],
"macros": {}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_required_macro7(self, active_window):
macros = {
"test": "$selection",
"expected": None,
"required": ["selection"],
"macros": {}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_required_macro8(self, active_window):
MockView.sel.return_value = [sublime.Region(5, 10)]
macros = {
"test": "$selection",
"expected": "Hello",
"required": [],
"macros": {}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_required_macro9(self, active_window):
MockView.sel.return_value = [sublime.Region(5, 10)]
macros = {
"test": "$selection",
"expected": "Hello",
"required": ["selection"],
"macros": {}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_required_macro10(self, active_window):
macros = {
"test": "",
"expected": None,
"required": [""],
"macros": {}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_recursion_macro(self, active_window):
macros = {
"test": "$required",
"expected": "",
"required": [],
"macros": {
"required": [
"$required"
]
}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_recursion_macro2(self, active_window):
macros = {
"test": "$required",
"expected": "",
"required": [],
"macros": {
"required": [
"$required2"
],
"required2": [
"$required"
]
}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_recursion_macro3(self, active_window):
macros = {
"test": "$required$required2",
"expected": "OutputOutput",
"required": [],
"macros": {
"required": [
"$required2",
"Output"
],
"required2": [
"$required"
]
}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_substring_macro(self, active_window):
macros = {
"test": "$custom;$custom2;$custom3;$custom4",
"expected": ".ext;.ext;.ext;.ext",
"required": [],
"macros": {
"custom": [
"$file",
["-4:"]
],
"custom2": [
"$file_name",
["-4:"]
],
"custom3": [
["$file", "-4:"]
],
"custom4": [
["$file_name", "-4:"]
]
}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
@patch('sublime.active_window', return_value=MockWindow)
def test_regex_macro(self, active_window):
macros = {
"test": "$custom;$custom2;$custom3;$custom4",
"expected": ".ext;.ext;.ext;.ext",
"required": [],
"macros": {
"custom": [
"$file",
["\\.\\w+$"]
],
"custom2": [
"$file_name",
["\\.\\w+$"]
],
"custom3": [
["$file", "\\.\\w+$"]
],
"custom4": [
["$file_name", "\\.\\w+$"]
]
}
}
self.assertEqual(
Macro.parse_macro(
string=macros["test"],
custom_macros=macros["macros"],
required=macros["required"]
),
macros["expected"]
)
| mit | -546,960,842,366,438,140 | 27.214859 | 68 | 0.436481 | false |
youprofit/scikit-image | skimage/io/tests/test_plugin_util.py | 35 | 2230 | from skimage.io._plugins.util import prepare_for_display, WindowManager
from skimage._shared._warnings import expected_warnings
from numpy.testing import *
import numpy as np
np.random.seed(0)
class TestPrepareForDisplay:
def test_basic(self):
with expected_warnings(['precision loss']):
prepare_for_display(np.random.rand(10, 10))
def test_dtype(self):
with expected_warnings(['precision loss']):
x = prepare_for_display(np.random.rand(10, 15))
assert x.dtype == np.dtype(np.uint8)
def test_grey(self):
with expected_warnings(['precision loss']):
tmp = np.arange(12, dtype=float).reshape((4, 3)) / 11
x = prepare_for_display(tmp)
assert_array_equal(x[..., 0], x[..., 2])
assert x[0, 0, 0] == 0
assert x[3, 2, 0] == 255
def test_colour(self):
with expected_warnings(['precision loss']):
prepare_for_display(np.random.rand(10, 10, 3))
def test_alpha(self):
with expected_warnings(['precision loss']):
prepare_for_display(np.random.rand(10, 10, 4))
@raises(ValueError)
def test_wrong_dimensionality(self):
with expected_warnings(['precision loss']):
prepare_for_display(np.random.rand(10, 10, 1, 1))
@raises(ValueError)
def test_wrong_depth(self):
with expected_warnings(['precision loss']):
prepare_for_display(np.random.rand(10, 10, 5))
class TestWindowManager:
callback_called = False
def setup(self):
self.wm = WindowManager()
self.wm.acquire('test')
def test_add_window(self):
self.wm.add_window('window1')
self.wm.remove_window('window1')
def callback(self):
self.callback_called = True
def test_callback(self):
self.wm.register_callback(self.callback)
self.wm.add_window('window')
self.wm.remove_window('window')
assert self.callback_called
def test_has_images(self):
assert not self.wm.has_windows()
self.wm.add_window('window')
assert self.wm.has_windows()
def teardown(self):
self.wm._release('test')
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 4,026,512,516,071,056,000 | 28.342105 | 71 | 0.612108 | false |
rghe/ansible | test/runner/lib/cloud/cs.py | 4 | 8841 | """CloudStack plugin for integration tests."""
from __future__ import absolute_import, print_function
import json
import os
import re
import time
from lib.cloud import (
CloudProvider,
CloudEnvironment,
)
from lib.util import (
find_executable,
ApplicationError,
display,
SubprocessError,
is_shippable,
)
from lib.http import (
HttpClient,
HttpError,
urlparse,
)
from lib.docker_util import (
docker_run,
docker_rm,
docker_inspect,
docker_pull,
docker_network_inspect,
get_docker_container_id,
)
try:
# noinspection PyPep8Naming
import ConfigParser as configparser
except ImportError:
# noinspection PyUnresolvedReferences
import configparser
class CsCloudProvider(CloudProvider):
"""CloudStack cloud provider plugin. Sets up cloud resources before delegation."""
DOCKER_SIMULATOR_NAME = 'cloudstack-sim'
def __init__(self, args):
"""
:type args: TestConfig
"""
super(CsCloudProvider, self).__init__(args, config_extension='.ini')
# The simulator must be pinned to a specific version to guarantee CI passes with the version used.
self.image = 'quay.io/ansible/cloudstack-test-container:1.2.0'
self.container_name = ''
self.endpoint = ''
self.host = ''
self.port = 0
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
docker = find_executable('docker', required=False)
if docker:
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
% (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(CsCloudProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def get_remote_ssh_options(self):
"""Get any additional options needed when delegating tests to a remote instance via SSH.
:rtype: list[str]
"""
if self.managed:
return ['-R', '8888:localhost:8888']
return []
def get_docker_run_options(self):
"""Get any additional options needed when delegating tests to a docker container.
:rtype: list[str]
"""
if self.managed:
return ['--link', self.DOCKER_SIMULATOR_NAME]
return []
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.container_name:
if is_shippable():
docker_rm(self.args, self.container_name)
elif not self.args.explain:
display.notice('Remember to run `docker rm -f %s` when finished testing.' % self.container_name)
super(CsCloudProvider, self).cleanup()
def _setup_static(self):
"""Configure CloudStack tests for use with static configuration."""
parser = configparser.RawConfigParser()
parser.read(self.config_static_path)
self.endpoint = parser.get('cloudstack', 'endpoint')
parts = urlparse(self.endpoint)
self.host = parts.hostname
if not self.host:
raise ApplicationError('Could not determine host from endpoint: %s' % self.endpoint)
if parts.port:
self.port = parts.port
elif parts.scheme == 'http':
self.port = 80
elif parts.scheme == 'https':
self.port = 443
else:
raise ApplicationError('Could not determine port from endpoint: %s' % self.endpoint)
display.info('Read cs host "%s" and port %d from config: %s' % (self.host, self.port, self.config_static_path), verbosity=1)
self._wait_for_service()
def _setup_dynamic(self):
"""Create a CloudStack simulator using docker."""
config = self._read_config_template()
self.container_name = self.DOCKER_SIMULATOR_NAME
results = docker_inspect(self.args, self.container_name)
if results and not results[0]['State']['Running']:
docker_rm(self.args, self.container_name)
results = []
if results:
display.info('Using the existing CloudStack simulator docker container.', verbosity=1)
else:
display.info('Starting a new CloudStack simulator docker container.', verbosity=1)
docker_pull(self.args, self.image)
docker_run(self.args, self.image, ['-d', '-p', '8888:8888', '--name', self.container_name])
if not self.args.explain:
display.notice('The CloudStack simulator will probably be ready in 2 - 4 minutes.')
container_id = get_docker_container_id()
if container_id:
display.info('Running in docker container: %s' % container_id, verbosity=1)
self.host = self._get_simulator_address()
display.info('Found CloudStack simulator container address: %s' % self.host, verbosity=1)
else:
self.host = 'localhost'
self.port = 8888
self.endpoint = 'http://%s:%d' % (self.host, self.port)
self._wait_for_service()
if self.args.explain:
values = dict(
HOST=self.host,
PORT=str(self.port),
)
else:
credentials = self._get_credentials()
if self.args.docker:
host = self.DOCKER_SIMULATOR_NAME
else:
host = self.host
values = dict(
HOST=host,
PORT=str(self.port),
KEY=credentials['apikey'],
SECRET=credentials['secretkey'],
)
config = self._populate_config_template(config, values)
self._write_config(config)
def _get_simulator_address(self):
networks = docker_network_inspect(self.args, 'bridge')
try:
bridge = [network for network in networks if network['Name'] == 'bridge'][0]
containers = bridge['Containers']
container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_SIMULATOR_NAME][0]
return re.sub(r'/[0-9]+$', '', container['IPv4Address'])
except:
display.error('Failed to process the following docker network inspect output:\n%s' %
json.dumps(networks, indent=4, sort_keys=True))
raise
def _wait_for_service(self):
"""Wait for the CloudStack service endpoint to accept connections."""
if self.args.explain:
return
client = HttpClient(self.args, always=True)
endpoint = self.endpoint
for _ in range(1, 30):
display.info('Waiting for CloudStack service: %s' % endpoint, verbosity=1)
try:
client.get(endpoint)
return
except SubprocessError:
pass
time.sleep(10)
raise ApplicationError('Timeout waiting for CloudStack service.')
def _get_credentials(self):
"""Wait for the CloudStack simulator to return credentials.
:rtype: dict[str, str]
"""
client = HttpClient(self.args, always=True)
endpoint = '%s/admin.json' % self.endpoint
for _ in range(1, 30):
display.info('Waiting for CloudStack credentials: %s' % endpoint, verbosity=1)
response = client.get(endpoint)
if response.status_code == 200:
try:
return response.json()
except HttpError as ex:
display.error(ex)
time.sleep(10)
raise ApplicationError('Timeout waiting for CloudStack credentials.')
class CsCloudEnvironment(CloudEnvironment):
"""CloudStack cloud environment plugin. Updates integration test environment after delegation."""
def configure_environment(self, env, cmd):
"""
:type env: dict[str, str]
:type cmd: list[str]
"""
changes = dict(
CLOUDSTACK_CONFIG=self.config_path,
)
env.update(changes)
cmd.append('-e')
cmd.append('cs_resource_prefix=%s' % self.resource_prefix)
| gpl-3.0 | 7,607,066,938,655,173,000 | 30.916968 | 141 | 0.592354 | false |
SHTOOLS/SHTOOLS | pyshtools/shio/yilm_index_vector.py | 2 | 1502 | def YilmIndexVector(i, l, m):
"""
Compute the index of a 1D array of spherical harmonic coefficients
corresponding to i, l, and m.
Usage
-----
index = YilmIndexVector (i, l, m)
Returns
-------
index : integer
Index of a 1D array of spherical harmonic coefficients corresponding
to i, l, and m.
Parameters
----------
i : integer
1 corresponds to the cosine coefficient Ylm = cilm[0,:,:], and 2
corresponds to the sine coefficient Yl,-m = cilm[1,:,:].
l : integer
The spherical harmonic degree.
m : integer
The angular order, which must be greater or equal to zero.
Notes
-----
YilmIndexVector will calculate the index of a 1D vector of spherical
harmonic coefficients corresponding to degree l, (positive) angular order
m and i (1 = cosine, 2 = sine). The index is given by l**2+(i-1)*l+m.
"""
if l < 0:
raise ValueError('The spherical harmonic degree must be positive. '
'Input value is {:s}'.format(repr(l)))
if m < 0:
raise ValueError('The angular order must be positive. '
'Input value is {:s}'.format(repr(m)))
if m >= l:
raise ValueError('The angular order must be less than or equal to '
'the spherical harmonic degree. Input degree is {:s}.'
' Input order is {:s}.'.format(repr(l), repr(m)))
return l**2 + (i - 1) * l + m
| bsd-3-clause | 7,922,310,537,229,932,000 | 33.930233 | 79 | 0.573236 | false |
ksachs/invenio | modules/websubmit/lib/functions/CaseEDS.py | 35 | 4620 | ## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This is the CaseEDS module. Contains the CaseEDS WebSubmit function.
"""
__revision__ = "$Id$"
import os
from invenio.websubmit_config import \
InvenioWebSubmitFunctionStop, \
InvenioWebSubmitFunctionError
def CaseEDS(parameters, curdir, form, user_info=None):
"""
This function compares the content of a file to different values and
directly goes to a different step in the action according to the value.
This function may be used if the treatment to be done after a
submission depends on a field entered by the user. Typically
this is used in an approval interface. If the referee approves
then we do this. If he rejects, then we do other thing. More
specifically, the function gets the value from the file named
[casevariable] and compares it with the values stored in
[casevalues]. If a value matches, the function directly goes to
the corresponding step stored in [casesteps]. If no value is
matched, it goes to step [casedefault].
@param parameters: (dictionary) of parameters (relating to the given
doctype/action) that are to be passed to the function:
+ casevariable: This parameters contains the name of the
file in which the function will get the
chosen value.
Eg:"decision"
+ casevalues: Contains the list of recognized values to
match with the chosen value. Should be a
comma separated list of words.
Eg:"approve,reject"
+ casesteps: Contains the list of steps corresponding to
the values matched in [casevalue]. It should
be a comma separated list of numbers.
Eg:"2,3"
In this example, if the value stored in the
file named"decision" is "approved", then the
function launches step 2 of this action. If it
is "reject", then step 3 is launched.
+ casedefault: Contains the step number to go by default if
no match is found.
Eg:"4"
In this example, if the value stored in the
file named "decision" is not "approved" nor
"reject", then step 4 is launched.
@return: (string) - empty string
"""
## Get the values of the parameters passed to this function via the
## parameters array:
casevariable = parameters['casevariable']
casevalue = parameters['casevalues']
casestep = parameters['casesteps']
casedefault = parameters['casedefault']
casevalues = casevalue.split(",")
casesteps = casestep.split(",")
cases = {}
for a, b in map(None, casevalues, casesteps):
cases[a] = b
nextstep = ""
if not os.path.exists("%s/%s" % (curdir, casevariable)):
nextstep = casedefault
else:
fp = open("%s/%s" % (curdir, casevariable), "r")
value = fp.read()
fp.close()
if cases.has_key(value):
nextstep = cases[value]
else:
nextstep = casedefault
if nextstep != "":
t = "<b>Please wait...</b>"
t = """
<SCRIPT LANGUAGE="JavaScript1.1">
document.forms[0].action="/submit";
document.forms[0].step.value=%s;
user_must_confirm_before_leaving_page = false;
document.forms[0].submit();
</SCRIPT>""" % nextstep
raise InvenioWebSubmitFunctionStop(t)
else:
raise InvenioWebSubmitFunctionError("Case function: Could not " \
"determine next action step")
return ""
| gpl-2.0 | 1,106,610,747,090,644,100 | 39.526316 | 78 | 0.610606 | false |
mithro/HDMI2USB-litex-firmware | targets/atlys/hdmi2usb.py | 1 | 2231 | from migen.fhdl.decorators import ClockDomainsRenamer
from litex.soc.integration.soc_core import mem_decoder
from litex.soc.interconnect import stream
from gateware.encoder import EncoderDMAReader, EncoderBuffer, Encoder
from gateware.streamer import USBStreamer
from targets.utils import csr_map_update
from targets.atlys.video import SoC as BaseSoC
class HDMI2USBSoC(BaseSoC):
csr_peripherals = (
"encoder_reader",
"encoder",
)
csr_map_update(BaseSoC.csr_map, csr_peripherals)
mem_map = {
"encoder": 0x50000000, # (shadow @0xd0000000)
}
mem_map.update(BaseSoC.mem_map)
def __init__(self, platform, *args, **kwargs):
BaseSoC.__init__(self, platform, *args, **kwargs)
encoder_port = self.sdram.crossbar.get_port(
mode="read",
data_width=128,
reverse=True,
)
self.submodules.encoder_reader = EncoderDMAReader(encoder_port)
encoder_cdc = stream.AsyncFIFO([("data", 128)], 4)
encoder_cdc = ClockDomainsRenamer({"write": "sys",
"read": "encoder"})(encoder_cdc)
encoder_buffer = ClockDomainsRenamer("encoder")(EncoderBuffer())
encoder = Encoder(platform)
encoder_streamer = USBStreamer(platform, platform.request("fx2"))
self.submodules += encoder_cdc, encoder_buffer, encoder, encoder_streamer
self.comb += [
self.encoder_reader.source.connect(encoder_cdc.sink),
encoder_cdc.source.connect(encoder_buffer.sink),
encoder_buffer.source.connect(encoder.sink),
encoder.source.connect(encoder_streamer.sink)
]
self.add_wb_slave(mem_decoder(self.mem_map["encoder"]), encoder.bus)
self.add_memory_region("encoder",
self.mem_map["encoder"] + self.shadow_base, 0x2000)
self.platform.add_period_constraint(encoder_streamer.cd_usb.clk, 10.0)
encoder_streamer.cd_usb.clk.attr.add("keep")
self.crg.cd_encoder.clk.attr.add("keep")
self.platform.add_false_path_constraints(
self.crg.cd_sys.clk,
self.crg.cd_encoder.clk,
encoder_streamer.cd_usb.clk)
SoC = HDMI2USBSoC
| bsd-2-clause | 2,761,800,389,207,639,000 | 36.183333 | 81 | 0.640072 | false |
Lingotek/client | tests/test_actions/test_download.py | 2 | 3533 | from tests.test_actions import *
from ltk.actions.download_action import *
from ltk.actions.clean_action import CleanAction
from ltk.actions.request_action import RequestAction
from ltk.actions.config_action import ConfigAction
from ltk.actions.pull_action import PullAction
from ltk.actions.rm_action import RmAction
from io import BytesIO
from io import StringIO
import sys
import unittest
class TestDownload(unittest.TestCase):
@classmethod
def setUpClass(cls):
create_config()
@classmethod
def tearDownClass(cls):
cleanup()
def setUp(self):
self.config_action = ConfigAction(os.getcwd())
self.config_action.config_action(clone_option='off')
self.config_action.config_action(download_folder='--none')
self.downloaded_files = []
self.locales = ['ja-JP', 'zh-CN']
self.action = DownloadAction(os.getcwd())
self.clean_action = CleanAction(os.getcwd())
self.request_action = RequestAction(os.getcwd(), None, None, self.locales, False, False, None, None)
self.pull_action = PullAction(os.getcwd(), self.action)
self.clean_action.clean_action(False, False, None)
self.files = ['sample.txt', 'sample1.txt']
self.first_doc = 'sample.txt'
for fn in self.files:
create_txt_file(fn)
os.system('ltk add sample*.txt -o') # Let the command line handle parsing the file pattern
self.doc_ids = self.action.doc_manager.get_doc_ids()
for doc_id in self.doc_ids:
assert poll_doc(self.action, doc_id)
self.request_action.target_action()
def tearDown(self):
self.rm_action = RmAction(os.getcwd())
for curr_file in self.files:
self.rm_action.rm_action([curr_file], remote=True, force=True)
self.clean_action.clean_action(False, False, None)
for dl_file in self.downloaded_files:
if os.path.exists(dl_file):
os.remove(dl_file)
self.rm_action.close()
self.clean_action.close()
self.action.close()
def get_dl_path(self, locale, document):
name_parts = document.split('.')
if len(name_parts) > 1:
name_parts.insert(-1, locale)
downloaded_name = '.'.join(part for part in name_parts)
else:
downloaded_name = name_parts[0] + '.' + locale
dl_path = os.path.join(self.action.path, downloaded_name)
return dl_path
def test_download_name(self):
self.action.download_by_path(self.first_doc, self.locales[0], False, False, False, False)
dl_file = self.get_dl_path(self.locales[0], self.first_doc)
assert self.locales[0] in dl_file
assert os.path.isfile(dl_file)
self.downloaded_files.append(dl_file)
def test_pull_all(self):
for document in self.files:
for locale in self.locales:
dl_file = self.get_dl_path(locale, document)
self.downloaded_files.append(dl_file)
self.pull_action.pull_translations(None, False, False, False)
for path in self.downloaded_files:
assert os.path.isfile(path)
def test_pull_locale(self):
for document in self.files:
dl_file = self.get_dl_path(self.locales[0], document)
self.downloaded_files.append(dl_file)
self.pull_action.pull_translations(self.locales[0], False, False, False)
for path in self.downloaded_files:
assert os.path.isfile(path)
| mit | -8,816,043,668,277,424,000 | 36.585106 | 108 | 0.636003 | false |
dbbhattacharya/kitsune | kitsune/sumo/middleware.py | 17 | 7876 | import contextlib
import re
import urllib
from django.conf import settings
from django.core.urlresolvers import is_valid_path
from django.db.utils import DatabaseError
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.http import HttpResponseForbidden
from django.shortcuts import render
from django.utils.cache import patch_vary_headers
from django.utils.encoding import iri_to_uri, smart_str, smart_unicode
import mobility
import tower
from kitsune.sumo.helpers import urlparams
from kitsune.sumo.urlresolvers import Prefixer, set_url_prefixer, split_path
from kitsune.sumo.views import handle403
class LocaleURLMiddleware(object):
"""
Based on zamboni.amo.middleware.
Tried to use localeurl but it choked on 'en-US' with capital letters.
1. Search for the locale.
2. Save it in the request.
3. Strip them from the URL.
"""
def process_request(self, request):
prefixer = Prefixer(request)
set_url_prefixer(prefixer)
full_path = prefixer.fix(prefixer.shortened_path)
if 'lang' in request.GET:
# Blank out the locale so that we can set a new one. Remove lang
# from the query params so we don't have an infinite loop.
prefixer.locale = ''
new_path = prefixer.fix(prefixer.shortened_path)
query = dict((smart_str(k), v) for
k, v in request.GET.iteritems() if k != 'lang')
# 'lang' is only used on the language selection page. If this is
# present it is safe to set language preference for the current
# user.
if request.user.is_anonymous():
cookie = settings.LANGUAGE_COOKIE_NAME
request.session[cookie] = request.GET['lang']
return HttpResponseRedirect(urlparams(new_path, **query))
if full_path != request.path:
query_string = request.META.get('QUERY_STRING', '')
full_path = urllib.quote(full_path.encode('utf-8'))
if query_string:
full_path = '%s?%s' % (full_path, query_string)
response = HttpResponseRedirect(full_path)
# Vary on Accept-Language if we changed the locale
old_locale = prefixer.locale
new_locale, _ = split_path(full_path)
if old_locale != new_locale:
response['Vary'] = 'Accept-Language'
return response
request.path_info = '/' + prefixer.shortened_path
request.LANGUAGE_CODE = prefixer.locale
tower.activate(prefixer.locale)
def process_response(self, request, response):
"""Unset the thread-local var we set during `process_request`."""
# This makes mistaken tests (that should use LocalizingClient but
# use Client instead) fail loudly and reliably. Otherwise, the set
# prefixer bleeds from one test to the next, making tests
# order-dependent and causing hard-to-track failures.
set_url_prefixer(None)
return response
def process_exception(self, request, exception):
set_url_prefixer(None)
class Forbidden403Middleware(object):
"""
Renders a 403.html page if response.status_code == 403.
"""
def process_response(self, request, response):
if isinstance(response, HttpResponseForbidden):
return handle403(request)
# If not 403, return response unmodified
return response
class NoCacheHttpsMiddleware(object):
"""
Sets no-cache headers when HTTPS META variable is set
and not equal to 'off'.
"""
def process_response(self, request, response):
if request.is_secure():
response['Expires'] = 'Thu, 19 Nov 1981 08:52:00 GMT'
response['Cache-Control'] = 'no-cache, must-revalidate'
response['Pragma'] = 'no-cache'
return response
class PlusToSpaceMiddleware(object):
"""Replace old-style + with %20 in URLs."""
def process_request(self, request):
p = re.compile(r'\+')
if p.search(request.path_info):
new = p.sub(' ', request.path_info)
if request.META['QUERY_STRING']:
new = u'%s?%s' % (new,
smart_unicode(request.META['QUERY_STRING']))
if hasattr(request, 'LANGUAGE_CODE'):
new = u'/%s%s' % (request.LANGUAGE_CODE, new)
return HttpResponsePermanentRedirect(new)
class ReadOnlyMiddleware(object):
def process_request(self, request):
if request.method == 'POST':
return render(request, 'sumo/read-only.html', status=503)
def process_exception(self, request, exception):
if isinstance(exception, DatabaseError):
return render(request, 'sumo/read-only.html', status=503)
class RemoveSlashMiddleware(object):
"""
Middleware that tries to remove a trailing slash if there was a 404.
If the response is a 404 because url resolution failed, we'll look for a
better url without a trailing slash.
"""
def process_response(self, request, response):
if (response.status_code == 404
and request.path_info.endswith('/')
and not is_valid_path(request.path_info)
and is_valid_path(request.path_info[:-1])):
# Use request.path because we munged app/locale in path_info.
newurl = request.path[:-1]
if request.GET:
with safe_query_string(request):
newurl += '?' + request.META['QUERY_STRING']
return HttpResponsePermanentRedirect(newurl)
return response
@contextlib.contextmanager
def safe_query_string(request):
"""
Turn the QUERY_STRING into a unicode- and ascii-safe string.
We need unicode so it can be combined with a reversed URL, but it has to be
ascii to go in a Location header. iri_to_uri seems like a good compromise.
"""
qs = request.META['QUERY_STRING']
try:
request.META['QUERY_STRING'] = iri_to_uri(qs)
yield
finally:
request.META['QUERY_STRING'] = qs
# Mobile user agents.
MOBILE_UAS = re.compile('android|fennec|mobile|iphone|opera (?:mini|mobi)')
# Tablet user agents. User agents matching tablets will not be considered
# to be mobile (for tablets, request.MOBILE = False).
TABLET_UAS = re.compile('tablet|ipad')
# This is a modified version of 'mobility.middleware.DetectMobileMiddleware'.
# We want to exclude tablets from being detected as MOBILE and there is
# no way to do that by just overriding the detection regex.
class DetectMobileMiddleware(object):
"""Looks at user agent and decides whether the device is mobile."""
def process_request(self, request):
ua = request.META.get('HTTP_USER_AGENT', '').lower()
mc = request.COOKIES.get(settings.MOBILE_COOKIE)
is_tablet = TABLET_UAS.search(ua)
is_mobile = not is_tablet and MOBILE_UAS.search(ua)
if (is_mobile and mc != 'off') or mc == 'on':
request.META['HTTP_X_MOBILE'] = '1'
def process_response(self, request, response):
patch_vary_headers(response, ['User-Agent'])
return response
class MobileSwitchMiddleware(object):
"""Looks for query string parameters to switch to the mobile site."""
def process_request(self, request):
mobile = request.GET.get('mobile')
if mobile == '0':
request.MOBILE = False
elif mobile == '1':
request.MOBILE = True
def process_response(self, request, response):
mobile = request.GET.get('mobile')
if mobile == '0':
response.set_cookie(mobility.middleware.COOKIE, 'off')
elif mobile == '1':
response.set_cookie(mobility.middleware.COOKIE, 'on')
return response
| bsd-3-clause | -4,682,056,368,905,304,000 | 34.96347 | 79 | 0.637887 | false |
mbooth101/spec2scl | spec2scl/bin.py | 2 | 4459 | """spec2scl entry point.
To be installed as /usr/bin/spec2scl.
"""
import argparse
import sys
from spec2scl.convertor import Convertor
from spec2scl.metapackage import Metapackage
from spec2scl.version import version
def get_parser():
"""Return an argument parser for CLIcommand."""
parser = argparse.ArgumentParser(description='Convert RPM specfile to be SCL ready.')
parser.add_argument(
'specfiles',
help='Path(s) to the specfile(s).',
metavar='ARGUMENT',
nargs='*',
)
parser.add_argument(
'-V', '--version',
help='show spec2scl version',
action='version',
version=version,
)
parser.add_argument(
'-i',
help='Convert in place (replace old specfiles with the new generated ones).'
' Mandatory when multiple specfiles are to be converted.',
required=False,
action='store_true'
)
parser.add_argument(
'-r', '--no-meta-runtime-dep',
required=False,
help='Don\'t add the runtime dependency on the scl runtime package.',
action='store_true'
)
parser.add_argument(
'-b', '--no-meta-buildtime-dep',
required=False,
help='Don\'t add the buildtime dependency on the scl runtime package.',
action='store_true'
)
parser.add_argument(
'-k', '--skip-functions',
required=False,
default="",
help='Comma separated list of transformer functions to skip.',
)
grp = parser.add_mutually_exclusive_group(required=False)
grp.add_argument(
'-n', '--no-deps-convert',
required=False,
help='Don\'t convert dependency tags (mutually exclusive with -l).',
action='store_true',
)
grp.add_argument(
'-l', '--list-file',
required=False,
help='List of the packages/provides, that will be in the SCL '
'(to convert Requires/BuildRequires properly). Lines in '
'the file are in form of "pkg-name %%{?custom_prefix}", '
'where the prefix part is optional.',
metavar='SCL_CONTENTS_LIST'
)
meta_group = parser.add_argument_group(title='metapackage arguments')
meta_group.add_argument(
'--meta-specfile',
required=False,
help='Produce metapackage specfile based on the metapackage '
'name provided, see SCL docs for metapackage naming.',
metavar='METAPACKAGE_NAME'
)
meta_group.add_argument(
'-v', '--variables',
required=False,
default="",
help='List of variables separated with comma (used only with'
' --meta-specfile option).',
)
return parser
def main(args=None):
"""Main CLI entry point."""
parser = get_parser()
args = parser.parse_args(args)
# Produce a metapackage specfile.
if args.meta_specfile:
metapackage = Metapackage(
meta_name=args.meta_specfile,
variables=args.variables)
print(metapackage.create_specfile())
return
if len(args.specfiles) > 1 and not args.i:
parser.error('You can only convert more specfiles using -i (in place) mode.')
if len(args.specfiles) == 0 and sys.stdin.isatty():
parser.error('You must either specify specfile(s) or reading from stdin.')
args.skip_functions = args.skip_functions.split(',')
convertor = Convertor(options=vars(args))
try:
convertor.handle_scl_deps()
except IOError as e:
print('Could not open file: {0}'.format(e))
sys.exit(1)
specs = []
# Convert a single specfile from stdin.
if len(args.specfiles) == 0 and not sys.stdin.isatty():
specs.append(sys.stdin.readlines())
# Convert specfiles passed as arguments.
for specfile in args.specfiles:
try:
with open(specfile) as f:
specs.append(f.readlines())
except IOError as e:
print('Could not open file: {0}'.format(e))
sys.exit(1)
for i, spec in enumerate(specs):
converted = convertor.convert(spec)
if not args.i or not args.specfiles:
print(converted)
else:
try:
f = open(args.specfiles[i], 'w')
f.write(str(converted))
except IOError as e:
print('Could not open file: {0}'.format(e))
else:
f.close()
| mit | -7,342,918,088,804,012,000 | 30.401408 | 89 | 0.592734 | false |
NeCTAR-RC/neutron | neutron/plugins/nec/extensions/packetfilter.py | 17 | 7755 | # Copyright 2012-2013 NEC Corporation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron.common import constants
from neutron.common import exceptions
from neutron import manager
from neutron import quota
quota_packet_filter_opts = [
cfg.IntOpt('quota_packet_filter',
default=100,
help=_("Number of packet_filters allowed per tenant, "
"-1 for unlimited"))
]
cfg.CONF.register_opts(quota_packet_filter_opts, 'QUOTAS')
class PacketFilterNotFound(exceptions.NotFound):
message = _("PacketFilter %(id)s could not be found")
class PacketFilterIpVersionNonSupported(exceptions.BadRequest):
message = _("IP version %(version)s is not supported for %(field)s "
"(%(value)s is specified)")
class PacketFilterInvalidPriority(exceptions.BadRequest):
message = _("Packet Filter priority should be %(min)s-%(max)s (included)")
class PacketFilterUpdateNotSupported(exceptions.BadRequest):
message = _("%(field)s field cannot be updated")
class PacketFilterDuplicatedPriority(exceptions.BadRequest):
message = _("The backend does not support duplicated priority. "
"Priority %(priority)s is in use")
class PacketFilterEtherTypeProtocolMismatch(exceptions.Conflict):
message = _("Ether Type '%(eth_type)s' conflicts with protocol "
"'%(protocol)s'. Update or clear protocol before "
"changing ether type.")
def convert_to_int_dec_and_hex(data):
try:
return int(data, 0)
except (ValueError, TypeError):
pass
try:
return int(data)
except (ValueError, TypeError):
msg = _("'%s' is not a integer") % data
raise exceptions.InvalidInput(error_message=msg)
def convert_to_int_or_none(data):
if data is None:
return
return convert_to_int_dec_and_hex(data)
PROTO_NAME_ARP = 'arp'
SUPPORTED_PROTOCOLS = [constants.PROTO_NAME_ICMP,
constants.PROTO_NAME_TCP,
constants.PROTO_NAME_UDP,
PROTO_NAME_ARP]
ALLOW_ACTIONS = ['allow', 'accept']
DROP_ACTIONS = ['drop', 'deny']
SUPPORTED_ACTIONS = ALLOW_ACTIONS + DROP_ACTIONS
ALIAS = 'packet-filter'
RESOURCE = 'packet_filter'
COLLECTION = 'packet_filters'
PACKET_FILTER_ACTION_REGEX = '(?i)^(%s)$' % '|'.join(SUPPORTED_ACTIONS)
PACKET_FILTER_PROTOCOL_REGEX = ('(?i)^(%s|0x[0-9a-fA-F]+|[0-9]+|)$' %
'|'.join(SUPPORTED_PROTOCOLS))
PACKET_FILTER_ATTR_PARAMS = {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': attributes.NAME_MAX_LEN},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': attributes.TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attributes.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'action': {'allow_post': True, 'allow_put': True,
'validate': {'type:regex': PACKET_FILTER_ACTION_REGEX},
'is_visible': True},
'priority': {'allow_post': True, 'allow_put': True,
'convert_to': convert_to_int_dec_and_hex,
'is_visible': True},
'in_port': {'allow_post': True, 'allow_put': False,
'default': attributes.ATTR_NOT_SPECIFIED,
'validate': {'type:uuid': None},
'is_visible': True},
'src_mac': {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'validate': {'type:mac_address_or_none': None},
'is_visible': True},
'dst_mac': {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'validate': {'type:mac_address_or_none': None},
'is_visible': True},
'eth_type': {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'convert_to': convert_to_int_or_none,
'is_visible': True},
'src_cidr': {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'validate': {'type:subnet_or_none': None},
'is_visible': True},
'dst_cidr': {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'validate': {'type:subnet_or_none': None},
'is_visible': True},
'protocol': {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'validate': {'type:regex_or_none':
PACKET_FILTER_PROTOCOL_REGEX},
'is_visible': True},
'src_port': {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'convert_to': convert_to_int_or_none,
'is_visible': True},
'dst_port': {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'convert_to': convert_to_int_or_none,
'is_visible': True},
}
PACKET_FILTER_ATTR_MAP = {COLLECTION: PACKET_FILTER_ATTR_PARAMS}
class Packetfilter(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return ALIAS
@classmethod
def get_alias(cls):
return ALIAS
@classmethod
def get_description(cls):
return "PacketFilters on OFC"
@classmethod
def get_namespace(cls):
return "http://www.nec.co.jp/api/ext/packet_filter/v2.0"
@classmethod
def get_updated(cls):
return "2013-07-16T00:00:00+09:00"
@classmethod
def get_resources(cls):
qresource = quota.CountableResource(RESOURCE,
quota._count_resource,
'quota_%s' % RESOURCE)
quota.QUOTAS.register_resource(qresource)
resource = base.create_resource(COLLECTION, RESOURCE,
manager.NeutronManager.get_plugin(),
PACKET_FILTER_ATTR_PARAMS)
pf_ext = extensions.ResourceExtension(
COLLECTION, resource, attr_map=PACKET_FILTER_ATTR_PARAMS)
return [pf_ext]
def get_extended_resources(self, version):
if version == "2.0":
return PACKET_FILTER_ATTR_MAP
else:
return {}
| apache-2.0 | 1,969,084,743,495,518,000 | 37.20197 | 78 | 0.579626 | false |
tfroehlich82/EventGhost | eg/Classes/MessageReceiver.py | 2 | 4391 | # -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2020 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
# Local imports
import eg
from eg.WinApi.Dynamic import (
byref, cast, CreateWindowEx, CW_USEDEFAULT, DefWindowProc, DestroyWindow,
GetModuleHandle, LPCTSTR, RegisterClass, UnregisterClass, WinError,
WM_SIZE, WM_USER, WNDCLASS, WNDPROC, WS_OVERLAPPEDWINDOW,
)
class MessageReceiver(eg.ThreadWorker):
"""
An eg.ThreadWorker with a invisible window to receive win32 messages for
different purposes.
"""
def __init__(self, windowName):
self.windowName = windowName
self.messageProcs = {
WM_SIZE: [self.WmSizeHandler],
}
eg.ThreadWorker.__init__(self)
wndclass = WNDCLASS(
lpfnWndProc = WNDPROC(self.WindowProc),
hInstance = GetModuleHandle(None),
lpszMenuName = None,
lpszClassName = self.windowName + "MessageReceiver",
)
self.classAtom = RegisterClass(byref(wndclass))
if not self.classAtom:
raise WinError()
self.wndclass = wndclass
self.hwnd = None
self.nextWmUserMsg = WM_USER + 1000
self.wmUserHandlers = {}
self.freeWmUserMsgs = []
def AddHandler(self, mesg, handler):
if mesg not in self.messageProcs:
self.messageProcs[mesg] = [handler]
else:
self.messageProcs[mesg].append(handler)
def AddWmUserHandler(self, handler):
if len(self.freeWmUserMsgs):
msg = self.freeWmUserMsgs.pop()
else:
msg = self.nextWmUserMsg
self.nextWmUserMsg += 1
if self.nextWmUserMsg > 0x7FFF:
raise Exception("Running out of WM_USER messages")
self.wmUserHandlers[handler] = msg
self.AddHandler(msg, handler)
return msg
def Finish(self):
"""
Overrides eg.ThreadWorker.Finish to destroy the window instance.
"""
if not DestroyWindow(self.hwnd):
raise WinError()
self.hwnd = None
def RemoveHandler(self, mesg, handler):
self.messageProcs[mesg].remove(handler)
if len(self.messageProcs[mesg]) == 0:
del self.messageProcs[mesg]
def RemoveWmUserHandler(self, handler):
msg = self.wmUserHandlers[handler]
del self.wmUserHandlers[handler]
self.freeWmUserMsgs.append(msg)
self.RemoveHandler(msg, handler)
return msg
@eg.LogIt
def Setup(self):
"""
Overrides eg.ThreadWorker.Setup to create the window instance.
"""
self.hwnd = CreateWindowEx(
0,
self.wndclass.lpszClassName,
self.windowName,
WS_OVERLAPPEDWINDOW,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
0,
0,
self.wndclass.hInstance,
None
)
if not self.hwnd:
raise WinError()
@eg.LogIt
def Stop(self):
self.messageProcs.clear()
eg.ThreadWorker.Stop(self, 5.0)
if not UnregisterClass(
cast(self.classAtom, LPCTSTR),
GetModuleHandle(None)
):
raise WinError()
def WindowProc(self, hwnd, mesg, wParam, lParam):
if mesg not in self.messageProcs:
return DefWindowProc(hwnd, mesg, wParam, lParam)
for handler in self.messageProcs[mesg]:
res = handler(hwnd, mesg, wParam, lParam)
if res == 0:
return 0
return 1
def WmSizeHandler(self, hwnd, mesg, wParam, lParam):
#print "MessageReceiver sized"
return 0
| gpl-2.0 | 632,442,106,804,209,300 | 31.761194 | 77 | 0.614123 | false |
adelina-t/nova | nova/objects/service.py | 8 | 9638 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova import availability_zones
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import utils
LOG = logging.getLogger(__name__)
# TODO(berrange): Remove NovaObjectDictCompat
class Service(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added compute_node nested object
# Version 1.2: String attributes updated to support unicode
# Version 1.3: ComputeNode version 1.5
# Version 1.4: Added use_slave to get_by_compute_host
# Version 1.5: ComputeNode version 1.6
# Version 1.6: ComputeNode version 1.7
# Version 1.7: ComputeNode version 1.8
# Version 1.8: ComputeNode version 1.9
# Version 1.9: ComputeNode version 1.10
# Version 1.10: Changes behaviour of loading compute_node
# Version 1.11: Added get_by_host_and_binary
# Version 1.12: ComputeNode version 1.11
VERSION = '1.12'
fields = {
'id': fields.IntegerField(read_only=True),
'host': fields.StringField(nullable=True),
'binary': fields.StringField(nullable=True),
'topic': fields.StringField(nullable=True),
'report_count': fields.IntegerField(),
'disabled': fields.BooleanField(),
'disabled_reason': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'compute_node': fields.ObjectField('ComputeNode'),
}
obj_relationships = {
'compute_node': [('1.1', '1.4'), ('1.3', '1.5'), ('1.5', '1.6'),
('1.7', '1.8'), ('1.8', '1.9'), ('1.9', '1.10'),
('1.12', '1.11')],
}
def obj_make_compatible(self, primitive, target_version):
_target_version = utils.convert_version_to_tuple(target_version)
if _target_version < (1, 10):
target_compute_version = self.obj_calculate_child_version(
target_version, 'compute_node')
# service.compute_node was not lazy-loaded, we need to provide it
# when called
self._do_compute_node(self._context, primitive,
target_compute_version)
super(Service, self).obj_make_compatible(primitive, target_version)
def _do_compute_node(self, context, primitive, target_version):
try:
# NOTE(sbauza): Some drivers (VMware, Ironic) can have multiple
# nodes for the same service, but for keeping same behaviour,
# returning only the first elem of the list
compute = objects.ComputeNodeList.get_all_by_host(
context, primitive['host'])[0]
except Exception:
return
primitive['compute_node'] = compute.obj_to_primitive(
target_version=target_version)
@staticmethod
def _from_db_object(context, service, db_service):
allow_missing = ('availability_zone',)
for key in service.fields:
if key in allow_missing and key not in db_service:
continue
if key == 'compute_node':
# NOTE(sbauza); We want to only lazy-load compute_node
continue
else:
service[key] = db_service[key]
service._context = context
service.obj_reset_changes()
return service
def obj_load_attr(self, attrname):
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading `%(attr)s' on %(name)s id %(id)s",
{'attr': attrname,
'name': self.obj_name(),
'id': self.id,
})
if attrname != 'compute_node':
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if self.binary == 'nova-compute':
# Only n-cpu services have attached compute_node(s)
compute_nodes = objects.ComputeNodeList.get_all_by_host(
self._context, self.host)
else:
# NOTE(sbauza); Previous behaviour was raising a ServiceNotFound,
# we keep it for backwards compatibility
raise exception.ServiceNotFound(service_id=self.id)
# NOTE(sbauza): Some drivers (VMware, Ironic) can have multiple nodes
# for the same service, but for keeping same behaviour, returning only
# the first elem of the list
self.compute_node = compute_nodes[0]
@base.remotable_classmethod
def get_by_id(cls, context, service_id):
db_service = db.service_get(context, service_id)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_host_and_topic(cls, context, host, topic):
db_service = db.service_get_by_host_and_topic(context, host, topic)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_host_and_binary(cls, context, host, binary):
try:
db_service = db.service_get_by_host_and_binary(context,
host, binary)
except exception.HostBinaryNotFound:
return
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_compute_host(cls, context, host, use_slave=False):
db_service = db.service_get_by_compute_host(context, host)
return cls._from_db_object(context, cls(), db_service)
# NOTE(ndipanov): This is deprecated and should be removed on the next
# major version bump
@base.remotable_classmethod
def get_by_args(cls, context, host, binary):
db_service = db.service_get_by_host_and_binary(context, host, binary)
return cls._from_db_object(context, cls(), db_service)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
db_service = db.service_create(self._context, updates)
self._from_db_object(self._context, self, db_service)
@base.remotable
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
db_service = db.service_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_service)
@base.remotable
def destroy(self):
db.service_destroy(self._context, self.id)
class ServiceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Service <= version 1.2
# Version 1.1 Service version 1.3
# Version 1.2: Service version 1.4
# Version 1.3: Service version 1.5
# Version 1.4: Service version 1.6
# Version 1.5: Service version 1.7
# Version 1.6: Service version 1.8
# Version 1.7: Service version 1.9
# Version 1.8: Service version 1.10
# Version 1.9: Added get_by_binary() and Service version 1.11
# Version 1.10: Service version 1.12
VERSION = '1.10'
fields = {
'objects': fields.ListOfObjectsField('Service'),
}
child_versions = {
'1.0': '1.2',
# NOTE(danms): Service was at 1.2 before we added this
'1.1': '1.3',
'1.2': '1.4',
'1.3': '1.5',
'1.4': '1.6',
'1.5': '1.7',
'1.6': '1.8',
'1.7': '1.9',
'1.8': '1.10',
'1.9': '1.11',
'1.10': '1.12',
}
@base.remotable_classmethod
def get_by_topic(cls, context, topic):
db_services = db.service_get_all_by_topic(context, topic)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_by_binary(cls, context, binary):
db_services = db.service_get_all_by_binary(context, binary)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_by_host(cls, context, host):
db_services = db.service_get_all_by_host(context, host)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_all(cls, context, disabled=None, set_zones=False):
db_services = db.service_get_all(context, disabled=disabled)
if set_zones:
db_services = availability_zones.set_availability_zones(
context, db_services)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
| apache-2.0 | -6,605,676,994,764,515,000 | 39.158333 | 78 | 0.602511 | false |
lorensen/VTKExamples | src/Python/VisualizationAlgorithms/ExponentialCosine.py | 1 | 2270 | #!/usr/bin/env python
import math
import vtk
def main():
colors = vtk.vtkNamedColors()
# Create the RenderWindow, Renderer and Interactor.
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create plane to warp
plane = vtk.vtkPlaneSource()
plane.SetResolution(300, 300)
transform = vtk.vtkTransform()
transform.Scale(10.0, 10.0, 1.0)
transF = vtk.vtkTransformPolyDataFilter()
transF.SetInputConnection(plane.GetOutputPort())
transF.SetTransform(transform)
transF.Update()
# Compute the Bessel function and derivatives. This portion could be
# encapsulated into source or filter object.
#
inputPd = transF.GetOutput()
numPts = inputPd.GetNumberOfPoints()
newPts = vtk.vtkPoints()
newPts.SetNumberOfPoints(numPts)
derivs = vtk.vtkDoubleArray()
derivs.SetNumberOfTuples(numPts)
bessel = vtk.vtkPolyData()
bessel.CopyStructure(inputPd)
bessel.SetPoints(newPts)
bessel.GetPointData().SetScalars(derivs)
x = [0.0] * 3
for i in range(0, numPts):
inputPd.GetPoint(i, x)
r = math.sqrt(float(x[0] * x[0]) + x[1] * x[1])
x[2] = math.exp(-r) * math.cos(10.0 * r)
newPts.SetPoint(i, x)
deriv = -math.exp(-r) * (math.cos(10.0 * r) + 10.0 * math.sin(10.0 * r))
derivs.SetValue(i, deriv)
# Warp the plane.
warp = vtk.vtkWarpScalar()
warp.SetInputData(bessel)
warp.XYPlaneOn()
warp.SetScaleFactor(0.5)
# Mapper and actor.
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(warp.GetOutputPort())
tmp = bessel.GetScalarRange()
mapper.SetScalarRange(tmp[0], tmp[1])
carpet = vtk.vtkActor()
carpet.SetMapper(mapper)
# Assign our actor to the renderer.
ren.AddActor(carpet)
ren.SetBackground(colors.GetColor3d("Beige"))
renWin.SetSize(640, 480)
# draw the resulting scene
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.4)
ren.GetActiveCamera().Elevation(-55)
ren.GetActiveCamera().Azimuth(25)
ren.ResetCameraClippingRange()
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
| apache-2.0 | 8,300,699,698,438,049,000 | 24.222222 | 80 | 0.652863 | false |
tima/ansible | lib/ansible/executor/play_iterator.py | 20 | 26397 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
from ansible import constants as C
from ansible.module_utils.six import iteritems
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.playbook.block import Block
from ansible.playbook.task import Task
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayIterator']
class HostState:
def __init__(self, blocks):
self._blocks = blocks[:]
self.cur_block = 0
self.cur_regular_task = 0
self.cur_rescue_task = 0
self.cur_always_task = 0
self.cur_dep_chain = None
self.run_state = PlayIterator.ITERATING_SETUP
self.fail_state = PlayIterator.FAILED_NONE
self.pending_setup = False
self.tasks_child_state = None
self.rescue_child_state = None
self.always_child_state = None
self.did_rescue = False
self.did_start_at_task = False
def __repr__(self):
return "HostState(%r)" % self._blocks
def __str__(self):
def _run_state_to_string(n):
states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"]
try:
return states[n]
except IndexError:
return "UNKNOWN STATE"
def _failed_state_to_string(n):
states = {1: "FAILED_SETUP", 2: "FAILED_TASKS", 4: "FAILED_RESCUE", 8: "FAILED_ALWAYS"}
if n == 0:
return "FAILED_NONE"
else:
ret = []
for i in (1, 2, 4, 8):
if n & i:
ret.append(states[i])
return "|".join(ret)
return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), "
"rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
self.cur_always_task,
_run_state_to_string(self.run_state),
_failed_state_to_string(self.fail_state),
self.pending_setup,
self.tasks_child_state,
self.rescue_child_state,
self.always_child_state,
self.did_rescue,
self.did_start_at_task,
))
def __eq__(self, other):
if not isinstance(other, HostState):
return False
for attr in ('_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task',
'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain',
'tasks_child_state', 'rescue_child_state', 'always_child_state'):
if getattr(self, attr) != getattr(other, attr):
return False
return True
def get_current_block(self):
return self._blocks[self.cur_block]
def copy(self):
new_state = HostState(self._blocks)
new_state.cur_block = self.cur_block
new_state.cur_regular_task = self.cur_regular_task
new_state.cur_rescue_task = self.cur_rescue_task
new_state.cur_always_task = self.cur_always_task
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pending_setup = self.pending_setup
new_state.did_rescue = self.did_rescue
new_state.did_start_at_task = self.did_start_at_task
if self.cur_dep_chain is not None:
new_state.cur_dep_chain = self.cur_dep_chain[:]
if self.tasks_child_state is not None:
new_state.tasks_child_state = self.tasks_child_state.copy()
if self.rescue_child_state is not None:
new_state.rescue_child_state = self.rescue_child_state.copy()
if self.always_child_state is not None:
new_state.always_child_state = self.always_child_state.copy()
return new_state
class PlayIterator:
# the primary running states for the play iteration
ITERATING_SETUP = 0
ITERATING_TASKS = 1
ITERATING_RESCUE = 2
ITERATING_ALWAYS = 3
ITERATING_COMPLETE = 4
# the failure states for the play iteration, which are powers
# of 2 as they may be or'ed together in certain circumstances
FAILED_NONE = 0
FAILED_SETUP = 1
FAILED_TASKS = 2
FAILED_RESCUE = 4
FAILED_ALWAYS = 8
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
self._play = play
self._blocks = []
self._variable_manager = variable_manager
# Default options to gather
gather_subset = play_context.gather_subset
gather_timeout = play_context.gather_timeout
fact_path = play_context.fact_path
# Retrieve subset to gather
if self._play.gather_subset is not None:
gather_subset = self._play.gather_subset
# Retrieve timeout for gather
if self._play.gather_timeout is not None:
gather_timeout = self._play.gather_timeout
# Retrieve fact_path
if self._play.fact_path is not None:
fact_path = self._play.fact_path
setup_block = Block(play=self._play)
setup_task = Task(block=setup_block)
setup_task.action = 'setup'
setup_task.name = 'Gathering Facts'
setup_task.tags = ['always']
setup_task.args = {
'gather_subset': gather_subset,
}
if gather_timeout:
setup_task.args['gather_timeout'] = gather_timeout
if fact_path:
setup_task.args['fact_path'] = fact_path
setup_task.set_loader(self._play._loader)
# short circuit fact gathering if the entire playbook is conditional
if self._play._included_conditional is not None:
setup_task.when = self._play._included_conditional[:]
setup_block.block = [setup_task]
setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
self._blocks.append(setup_block)
self.cache_block_tasks(setup_block)
for block in self._play.compile():
new_block = block.filter_tagged_tasks(play_context, all_vars)
if new_block.has_tasks():
self.cache_block_tasks(new_block)
self._blocks.append(new_block)
for handler_block in self._play.handlers:
self.cache_block_tasks(handler_block)
self._host_states = {}
start_at_matched = False
batch = inventory.get_hosts(self._play.hosts)
self.batch_size = len(batch)
for host in batch:
self._host_states[host.name] = HostState(blocks=self._blocks)
# if we're looking to start at a specific task, iterate through
# the tasks for this host until we find the specified task
if play_context.start_at_task is not None and not start_at_done:
while True:
(s, task) = self.get_next_task_for_host(host, peek=True)
if s.run_state == self.ITERATING_COMPLETE:
break
if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
start_at_matched = True
break
else:
self.get_next_task_for_host(host)
# finally, reset the host's state to ITERATING_SETUP
if start_at_matched:
self._host_states[host.name].did_start_at_task = True
self._host_states[host.name].run_state = self.ITERATING_SETUP
if start_at_matched:
# we have our match, so clear the start_at_task field on the
# play context to flag that we've started at a task (and future
# plays won't try to advance)
play_context.start_at_task = None
def get_host_state(self, host):
# Since we're using the PlayIterator to carry forward failed hosts,
# in the event that a previous host was not in the current inventory
# we create a stub state for it now
if host.name not in self._host_states:
self._host_states[host.name] = HostState(blocks=[])
return self._host_states[host.name].copy()
def cache_block_tasks(self, block):
# now a noop, we've changed the way we do caching and finding of
# original task entries, but just in case any 3rd party strategies
# are using this we're leaving it here for now
return
def get_next_task_for_host(self, host, peek=False):
display.debug("getting the next task for host %s" % host.name)
s = self.get_host_state(host)
task = None
if s.run_state == self.ITERATING_COMPLETE:
display.debug("host %s is done iterating, returning" % host.name)
return (s, None)
(s, task) = self._get_next_task_from_state(s, host=host, peek=peek)
if not peek:
self._host_states[host.name] = s
display.debug("done getting next task for host %s" % host.name)
display.debug(" ^ task is: %s" % task)
display.debug(" ^ state is: %s" % s)
return (s, task)
def _get_next_task_from_state(self, state, host, peek, in_child=False):
task = None
# try and find the next task, given the current state.
while True:
# try to get the current block from the list of blocks, and
# if we run past the end of the list we know we're done with
# this block
try:
block = state._blocks[state.cur_block]
except IndexError:
state.run_state = self.ITERATING_COMPLETE
return (state, None)
if state.run_state == self.ITERATING_SETUP:
# First, we check to see if we were pending setup. If not, this is
# the first trip through ITERATING_SETUP, so we set the pending_setup
# flag and try to determine if we do in fact want to gather facts for
# the specified host.
if not state.pending_setup:
state.pending_setup = True
# Gather facts if the default is 'smart' and we have not yet
# done it for this host; or if 'explicit' and the play sets
# gather_facts to True; or if 'implicit' and the play does
# NOT explicitly set gather_facts to False.
gathering = C.DEFAULT_GATHERING
implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False)
if (gathering == 'implicit' and implied) or \
(gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
(gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('module_setup', False))):
# The setup block is always self._blocks[0], as we inject it
# during the play compilation in __init__ above.
setup_block = self._blocks[0]
if setup_block.has_tasks() and len(setup_block.block) > 0:
task = setup_block.block[0]
else:
# This is the second trip through ITERATING_SETUP, so we clear
# the flag and move onto the next block in the list while setting
# the run state to ITERATING_TASKS
state.pending_setup = False
state.run_state = self.ITERATING_TASKS
if not state.did_start_at_task:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.child_state = None
elif state.run_state == self.ITERATING_TASKS:
# clear the pending setup flag, since we're past that and it didn't fail
if state.pending_setup:
state.pending_setup = False
# First, we check for a child task state that is not failed, and if we
# have one recurse into it for the next task. If we're done with the child
# state, we clear it and drop back to getting the next task from the list.
if state.tasks_child_state:
(state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.tasks_child_state):
# failed child state, so clear it and move into the rescue portion
state.tasks_child_state = None
self._set_failed_state(state)
else:
# get the next task recursively
if task is None or state.tasks_child_state.run_state == self.ITERATING_COMPLETE:
# we're done with the child state, so clear it and continue
# back to the top of the loop to get the next task
state.tasks_child_state = None
continue
else:
# First here, we check to see if we've failed anywhere down the chain
# of states we have, and if so we move onto the rescue portion. Otherwise,
# we check to see if we've moved past the end of the list of tasks. If so,
# we move into the always portion of the block, otherwise we get the next
# task from the list.
if self._check_failed_state(state):
state.run_state = self.ITERATING_RESCUE
elif state.cur_regular_task >= len(block.block):
state.run_state = self.ITERATING_ALWAYS
else:
task = block.block[state.cur_regular_task]
# if the current task is actually a child block, create a child
# state for us to recurse into on the next pass
if isinstance(task, Block) or state.tasks_child_state is not None:
state.tasks_child_state = HostState(blocks=[task])
state.tasks_child_state.run_state = self.ITERATING_TASKS
# since we've created the child state, clear the task
# so we can pick up the child state on the next pass
task = None
state.cur_regular_task += 1
elif state.run_state == self.ITERATING_RESCUE:
# The process here is identical to ITERATING_TASKS, except instead
# we move into the always portion of the block.
if host.name in self._play._removed_hosts:
self._play._removed_hosts.remove(host.name)
if state.rescue_child_state:
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.rescue_child_state):
state.rescue_child_state = None
self._set_failed_state(state)
else:
if task is None or state.rescue_child_state.run_state == self.ITERATING_COMPLETE:
state.rescue_child_state = None
continue
else:
if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
state.run_state = self.ITERATING_ALWAYS
elif state.cur_rescue_task >= len(block.rescue):
if len(block.rescue) > 0:
state.fail_state = self.FAILED_NONE
state.run_state = self.ITERATING_ALWAYS
state.did_rescue = True
else:
task = block.rescue[state.cur_rescue_task]
if isinstance(task, Block) or state.rescue_child_state is not None:
state.rescue_child_state = HostState(blocks=[task])
state.rescue_child_state.run_state = self.ITERATING_TASKS
task = None
state.cur_rescue_task += 1
elif state.run_state == self.ITERATING_ALWAYS:
# And again, the process here is identical to ITERATING_TASKS, except
# instead we either move onto the next block in the list, or we set the
# run state to ITERATING_COMPLETE in the event of any errors, or when we
# have hit the end of the list of blocks.
if state.always_child_state:
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.always_child_state):
state.always_child_state = None
self._set_failed_state(state)
else:
if task is None or state.always_child_state.run_state == self.ITERATING_COMPLETE:
state.always_child_state = None
continue
else:
if state.cur_always_task >= len(block.always):
if state.fail_state != self.FAILED_NONE:
state.run_state = self.ITERATING_COMPLETE
else:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.run_state = self.ITERATING_TASKS
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
state.did_rescue = False
# we're advancing blocks, so if this was an end-of-role block we
# mark the current role complete
if block._eor and host.name in block._role._had_task_run and not in_child and not peek:
block._role._completed[host.name] = True
else:
task = block.always[state.cur_always_task]
if isinstance(task, Block) or state.always_child_state is not None:
state.always_child_state = HostState(blocks=[task])
state.always_child_state.run_state = self.ITERATING_TASKS
task = None
state.cur_always_task += 1
elif state.run_state == self.ITERATING_COMPLETE:
return (state, None)
# if something above set the task, break out of the loop now
if task:
break
return (state, task)
def _set_failed_state(self, state):
if state.run_state == self.ITERATING_SETUP:
state.fail_state |= self.FAILED_SETUP
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_TASKS:
if state.tasks_child_state is not None:
state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
else:
state.fail_state |= self.FAILED_TASKS
if state._blocks[state.cur_block].rescue:
state.run_state = self.ITERATING_RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = self.ITERATING_ALWAYS
else:
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_RESCUE:
if state.rescue_child_state is not None:
state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
else:
state.fail_state |= self.FAILED_RESCUE
if state._blocks[state.cur_block].always:
state.run_state = self.ITERATING_ALWAYS
else:
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_ALWAYS:
if state.always_child_state is not None:
state.always_child_state = self._set_failed_state(state.always_child_state)
else:
state.fail_state |= self.FAILED_ALWAYS
state.run_state = self.ITERATING_COMPLETE
return state
def mark_host_failed(self, host):
s = self.get_host_state(host)
display.debug("marking host %s failed, current state: %s" % (host, s))
s = self._set_failed_state(s)
display.debug("^ failed state is now: %s" % s)
self._host_states[host.name] = s
self._play._removed_hosts.append(host.name)
def get_failed_hosts(self):
return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state))
def _check_failed_state(self, state):
if state is None:
return False
elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
return True
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
return True
elif state.fail_state != self.FAILED_NONE:
if state.run_state == self.ITERATING_RESCUE and state.fail_state & self.FAILED_RESCUE == 0:
return False
elif state.run_state == self.ITERATING_ALWAYS and state.fail_state & self.FAILED_ALWAYS == 0:
return False
else:
return not state.did_rescue
elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state):
cur_block = self._blocks[state.cur_block]
if len(cur_block.rescue) > 0 and state.fail_state & self.FAILED_RESCUE == 0:
return False
else:
return True
return False
def is_failed(self, host):
s = self.get_host_state(host)
return self._check_failed_state(s)
def get_original_task(self, host, task):
# now a noop because we've changed the way we do caching
return (None, None)
def _insert_tasks_into_state(self, state, task_list):
# if we've failed at all, or if the task list is empty, just return the current state
if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list:
return state
if state.run_state == self.ITERATING_TASKS:
if state.tasks_child_state:
state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy(exclude_parent=True)
before = target_block.block[:state.cur_regular_task]
after = target_block.block[state.cur_regular_task:]
target_block.block = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == self.ITERATING_RESCUE:
if state.rescue_child_state:
state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy(exclude_parent=True)
before = target_block.rescue[:state.cur_rescue_task]
after = target_block.rescue[state.cur_rescue_task:]
target_block.rescue = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == self.ITERATING_ALWAYS:
if state.always_child_state:
state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy(exclude_parent=True)
before = target_block.always[:state.cur_always_task]
after = target_block.always[state.cur_always_task:]
target_block.always = before + task_list + after
state._blocks[state.cur_block] = target_block
return state
def add_tasks(self, host, task_list):
self._host_states[host.name] = self._insert_tasks_into_state(self.get_host_state(host), task_list)
| gpl-3.0 | 8,333,173,544,927,719,000 | 46.734177 | 149 | 0.561011 | false |
tchernomax/ansible | lib/ansible/modules/network/aci/aci_interface_policy_leaf_profile.py | 10 | 6845 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_interface_policy_leaf_profile
short_description: Manage fabric interface policy leaf profiles (infra:AccPortP)
description:
- Manage fabric interface policy leaf profiles on Cisco ACI fabrics.
notes:
- More information about the internal APIC class B(infra:AccPortP) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Bruno Calogero (@brunocalogero)
version_added: '2.5'
options:
leaf_interface_profile:
description:
- The name of the Fabric access policy leaf interface profile.
required: yes
aliases: [ name, leaf_interface_profile_name ]
description:
description:
- Description for the Fabric access policy leaf interface profile.
aliases: [ descr ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new leaf_interface_profile
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
description: leafintprfname description
state: present
delegate_to: localhost
- name: Remove a leaf_interface_profile
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
state: absent
delegate_to: localhost
- name: Remove all leaf_interface_profiles
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
state: absent
delegate_to: localhost
- name: Query a leaf_interface_profile
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
leaf_interface_profile=dict(type='str', aliases=['name', 'leaf_interface_profile_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['leaf_interface_profile']],
['state', 'present', ['leaf_interface_profile']],
],
)
leaf_interface_profile = module.params['leaf_interface_profile']
description = module.params['description']
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraAccPortP',
aci_rn='infra/accportprof-{0}'.format(leaf_interface_profile),
module_object=leaf_interface_profile,
target_filter={'name': leaf_interface_profile},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraAccPortP',
class_config=dict(
name=leaf_interface_profile,
descr=description,
),
)
aci.get_diff(aci_class='infraAccPortP')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 | 4,175,242,929,237,327,000 | 26.938776 | 141 | 0.621329 | false |
jvanbrug/netflix | tests/test_py_c_interface.py | 1 | 1455 | import ctypes
import numpy as np
import os
from utils import data_paths
def test_py_c_access_test_can_read_numpy_array_in_memory():
test_array = np.array(range(0, 100), dtype=np.int32)
library_file_name = 'test_py_c_interface.so'
library_file_path = os.path.join(data_paths.LIBRARY_DIR_PATH,
library_file_name)
test_lib = ctypes.cdll.LoadLibrary(library_file_path)
returned_value = test_lib.py_c_access_test(
ctypes.c_void_p(test_array.ctypes.data),
ctypes.c_int32(100)
)
read_error_message = 'Py/C cannot read the numpy array in memory.'
assert returned_value == 0, read_error_message
def test_py_c_can_write_to_numpy_array_in_memory():
test_array = np.array(range(0, 100), dtype=np.int32)
expected_array = np.copy(test_array)
expected_array[5:50] = np.ones(shape=(45,), dtype=np.int32) * 3
library_file_name = 'test_py_c_interface.so'
library_file_path = os.path.join(data_paths.LIBRARY_DIR_PATH,
library_file_name)
test_lib = ctypes.cdll.LoadLibrary(library_file_path)
returned_value = test_lib.py_c_write_test(test_array[5:50].ctypes.data,
ctypes.c_int32(45))
write_error_message = 'Py/C cannot write to the numpy array in memory.'
assert returned_value == 0, write_error_message
np.testing.assert_array_equal(test_array, expected_array)
| mit | 4,447,658,009,486,906,400 | 41.794118 | 75 | 0.642612 | false |
mmalorni/server-tools | __unported__/server_env_base_external_referentials/__openerp__.py | 17 | 1921 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2011-2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Server environment for base_external_referential",
"version": "1.0",
"depends": ["base", 'server_environment', 'base_external_referentials'],
"author": "Camptocamp",
'license': 'AGPL-3',
"description": """This module is based on the server_environment module to use files for configuration.
Thus we can have a different file for each environment (dev, test, staging, prod).
This module define the config variables for the base_external_referential module.
In the configuration file, you can configure the url, login and password of the referentials
Exemple of the section to put in the configuration file :
[external_referential.name_of_my_external_referential]
location = http://localhost/magento/
apiusername = my_api_login
apipass = my_api_password
""",
"website": "http://www.camptocamp.com",
"category": "Tools",
"init_xml": [],
"demo_xml": [],
"update_xml": [],
"installable": False,
"active": False,
}
| agpl-3.0 | 4,934,513,819,826,931,000 | 39.87234 | 107 | 0.646018 | false |
nileshk/url-shortener | settings.py | 2 | 3330 | # Django settings for urlweb project.
import os, logging
#from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
logging.basicConfig(
level = logging.DEBUG,
format = '%(asctime)s %(levelname)s %(message)s',
)
logging.debug("Reading settings...")
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
#DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_NAME = os.path.join(PROJECT_PATH, 'database.sqlite')
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#### CHANGE_ME ####'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.middleware.transaction.TransactionMiddleware',
)
ROOT_URLCONF = 'urlweb.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, 'templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'urlweb.shortener',
)
STATIC_DOC_ROOT = os.path.join(PROJECT_PATH, 'static')
LOGIN_REDIRECT_URL = '/'
#TEMPLATE_CONTEXT_PROCESSORS += (
# 'django.core.context_processors.request',
# )
SITE_NAME = 'localhost:8000'
SITE_BASE_URL = 'http://' + SITE_NAME + '/'
REQUIRE_LOGIN = True
| mit | 4,012,834,744,008,253,000 | 31.330097 | 108 | 0.702102 | false |
apache/incubator-airflow | airflow/providers/postgres/operators/postgres.py | 7 | 2837 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Iterable, Mapping, Optional, Union
from airflow.models import BaseOperator
from airflow.providers.postgres.hooks.postgres import PostgresHook
from airflow.utils.decorators import apply_defaults
class PostgresOperator(BaseOperator):
"""
Executes sql code in a specific Postgres database
:param sql: the sql code to be executed. (templated)
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'
:param postgres_conn_id: reference to a specific postgres database
:type postgres_conn_id: str
:param autocommit: if True, each command is automatically committed.
(default value: False)
:type autocommit: bool
:param parameters: (optional) the parameters to render the SQL query with.
:type parameters: dict or iterable
:param database: name of database which overwrite defined one in connection
:type database: str
"""
template_fields = ('sql',)
template_fields_renderers = {'sql': 'sql'}
template_ext = ('.sql',)
ui_color = '#ededed'
@apply_defaults
def __init__(
self,
*,
sql: str,
postgres_conn_id: str = 'postgres_default',
autocommit: bool = False,
parameters: Optional[Union[Mapping, Iterable]] = None,
database: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.postgres_conn_id = postgres_conn_id
self.autocommit = autocommit
self.parameters = parameters
self.database = database
self.hook = None
def execute(self, context):
self.log.info('Executing: %s', self.sql)
self.hook = PostgresHook(postgres_conn_id=self.postgres_conn_id, schema=self.database)
self.hook.run(self.sql, self.autocommit, parameters=self.parameters)
for output in self.hook.conn.notices:
self.log.info(output)
| apache-2.0 | -6,200,059,722,519,025,000 | 37.863014 | 94 | 0.692633 | false |
myarjunar/inasafe | safe/gui/gui_utilities.py | 1 | 1646 | # coding=utf-8
"""GUI utilities for the dock and the multi Exposure Tool."""
from PyQt4.QtCore import Qt
from qgis.core import QgsMapLayerRegistry
def layer_from_combo(combo):
"""Get the QgsMapLayer currently selected in a combo.
Obtain QgsMapLayer id from the userrole of the QtCombo and return it as a
QgsMapLayer.
:returns: The currently selected map layer a combo.
:rtype: QgsMapLayer
"""
index = combo.currentIndex()
if index < 0:
return None
layer_id = combo.itemData(index, Qt.UserRole)
layer = QgsMapLayerRegistry.instance().mapLayer(layer_id)
return layer
def add_ordered_combo_item(combo, text, data=None):
"""Add a combo item ensuring that all items are listed alphabetically.
Although QComboBox allows you to set an InsertAlphabetically enum
this only has effect when a user interactively adds combo items to
an editable combo. This we have this little function to ensure that
combos are always sorted alphabetically.
:param combo: Combo box receiving the new item.
:type combo: QComboBox
:param text: Display text for the combo.
:type text: str
:param data: Optional UserRole data to be associated with the item.
:type data: QVariant, str
"""
size = combo.count()
for combo_index in range(0, size):
item_text = combo.itemText(combo_index)
# see if text alphabetically precedes item_text
if cmp(text.lower(), item_text.lower()) < 0:
combo.insertItem(combo_index, text, data)
return
# otherwise just add it to the end
combo.insertItem(size, text, data)
| gpl-3.0 | -9,182,464,517,506,760,000 | 30.056604 | 77 | 0.690158 | false |
mrquim/mrquimrepo | repo/plugin.video.neptune-1.2.2/resources/lib/modules/trailer.py | 5 | 4060 | # -*- coding: utf-8 -*-
'''
Neptune Rising Add-on
Copyright (C) 2016 Mr. blamo
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,json,urlparse,base64,random
from resources.lib.modules import client
from resources.lib.modules import control
class trailer:
def __init__(self):
self.base_link = 'http://www.youtube.com'
self.key_link = random.choice(['QUl6YVN5RDd2aFpDLTYta2habTVuYlVyLTZ0Q0JRQnZWcnFkeHNz', 'QUl6YVN5Q2RiNEFNenZpVG0yaHJhSFY3MXo2Nl9HNXBhM2ZvVXd3'])
self.key_link = '&key=%s' % base64.urlsafe_b64decode(self.key_link)
self.search_link = 'https://www.googleapis.com/youtube/v3/search?part=snippet&type=video&maxResults=5&q=%s'
self.youtube_search = 'https://www.googleapis.com/youtube/v3/search?q='
self.youtube_watch = 'http://www.youtube.com/watch?v=%s'
def play(self, name, url=None):
try:
url = self.worker(name, url)
if url == None: return
title = control.infoLabel('listitem.title')
if title == '': title = control.infoLabel('listitem.label')
icon = control.infoLabel('listitem.icon')
item = control.item(path=url, iconImage=icon, thumbnailImage=icon)
try: item.setArt({'icon': icon})
except: pass
item.setInfo(type='Video', infoLabels = {'title': title})
control.player.play(url, item)
except:
pass
def worker(self, name, url):
try:
if url.startswith(self.base_link):
url = self.resolve(url)
if url == None: raise Exception()
return url
elif not url.startswith('http://'):
url = self.youtube_watch % url
url = self.resolve(url)
if url == None: raise Exception()
return url
else:
raise Exception()
except:
query = name + ' trailer'
query = self.youtube_search + query
url = self.search(query)
if url == None: return
return url
def search(self, url):
try:
query = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
apiLang = control.apiLanguage()['youtube']
if apiLang != 'en':
url += "&relevanceLanguage=%s" % apiLang
url = self.search_link % urllib.quote_plus(query) + self.key_link
result = client.request(url)
items = json.loads(result)['items']
items = [(i['id']['videoId']) for i in items]
for url in items:
url = self.resolve(url)
if not url is None: return url
except:
return
def resolve(self, url):
try:
id = url.split('?v=')[-1].split('/')[-1].split('?')[0].split('&')[0]
result = client.request('http://www.youtube.com/watch?v=%s' % id)
message = client.parseDOM(result, 'div', attrs = {'id': 'unavailable-submessage'})
message = ''.join(message)
alert = client.parseDOM(result, 'div', attrs = {'id': 'watch7-notification-area'})
if len(alert) > 0: raise Exception()
if re.search('[a-zA-Z]', message): raise Exception()
url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id
return url
except:
return
| gpl-2.0 | 5,560,824,499,457,464,000 | 34 | 151 | 0.578325 | false |
navcoindev/navcoin-core | share/rpcuser/rpcuser.py | 1 | 1110 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to navcoin.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
| mit | -5,064,398,183,000,619,000 | 26.073171 | 79 | 0.723423 | false |
markflyhigh/incubator-beam | sdks/python/apache_beam/io/gcp/datastore/v1/datastoreio_test.py | 2 | 12245 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import unittest
from builtins import map
from builtins import range
from builtins import zip
from mock import MagicMock
from mock import call
from mock import patch
# Protect against environments where datastore library is not available.
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from google.cloud.proto.datastore.v1 import datastore_pb2
from apache_beam.io.gcp.datastore.v1 import fake_datastore
from apache_beam.io.gcp.datastore.v1 import helper
from google.cloud.proto.datastore.v1 import query_pb2
from apache_beam.io.gcp.datastore.v1 import query_splitter
from apache_beam.io.gcp.datastore.v1 import util
from apache_beam.io.gcp.datastore.v1.datastoreio import ReadFromDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import WriteToDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import _Mutate
from google.protobuf import timestamp_pb2
from googledatastore import helper as datastore_helper
except (ImportError, TypeError):
datastore_pb2 = None
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
class DatastoreioTest(unittest.TestCase):
_PROJECT = 'project'
_KIND = 'kind'
_NAMESPACE = 'namespace'
@unittest.skipIf(sys.version_info[0] == 3,
'v1/datastoreio does not support Python 3 TODO: BEAM-4543')
@unittest.skipIf(datastore_pb2 is None, 'GCP dependencies are not installed')
def setUp(self):
self._mock_datastore = MagicMock()
self._query = query_pb2.Query()
self._query.kind.add().name = self._KIND
self._WRITE_BATCH_INITIAL_SIZE = util.WRITE_BATCH_INITIAL_SIZE
def get_timestamp(self):
return timestamp_pb2.Timestamp(seconds=1234)
def test_get_estimated_size_bytes_without_namespace(self):
entity_bytes = 100
timestamp = self.get_timestamp()
self.check_estimated_size_bytes(entity_bytes, timestamp)
def test_get_estimated_size_bytes_with_namespace(self):
entity_bytes = 100
timestamp = self.get_timestamp()
self.check_estimated_size_bytes(entity_bytes, timestamp, self._NAMESPACE)
def test_SplitQueryFn_with_num_splits(self):
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
num_splits = 23
def fake_get_splits(datastore, query, num_splits, partition=None):
return self.split_query(query, num_splits)
with patch.object(query_splitter, 'get_splits',
side_effect=fake_get_splits):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), num_splits)
self.assertEqual(0, len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_SplitQueryFn_without_num_splits(self):
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
# Force SplitQueryFn to compute the number of query splits
num_splits = 0
expected_num_splits = 23
entity_bytes = (expected_num_splits *
ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES)
with patch.object(ReadFromDatastore, 'get_estimated_size_bytes',
return_value=entity_bytes):
def fake_get_splits(datastore, query, num_splits, partition=None):
return self.split_query(query, num_splits)
with patch.object(query_splitter, 'get_splits',
side_effect=fake_get_splits):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), expected_num_splits)
self.assertEqual(0,
len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_SplitQueryFn_with_query_limit(self):
"""A test that verifies no split is performed when the query has a limit."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
self._query.limit.value = 3
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, 4)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(1, len(returned_split_queries))
self.assertEqual(0, len(self._mock_datastore.method_calls))
def test_SplitQueryFn_with_exception(self):
"""A test that verifies that no split is performed when failures occur."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
# Force SplitQueryFn to compute the number of query splits
num_splits = 0
expected_num_splits = 1
entity_bytes = (expected_num_splits *
ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES)
with patch.object(ReadFromDatastore, 'get_estimated_size_bytes',
return_value=entity_bytes):
with patch.object(query_splitter, 'get_splits',
side_effect=ValueError("Testing query split error")):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), expected_num_splits)
self.assertEqual(returned_split_queries[0][1], self._query)
self.assertEqual(0,
len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_DatastoreWriteFn_with_empty_batch(self):
self.check_DatastoreWriteFn(0)
def test_DatastoreWriteFn_with_one_batch(self):
num_entities_to_write = self._WRITE_BATCH_INITIAL_SIZE * 1 - 50
self.check_DatastoreWriteFn(num_entities_to_write)
def test_DatastoreWriteFn_with_multiple_batches(self):
num_entities_to_write = self._WRITE_BATCH_INITIAL_SIZE * 3 + 50
self.check_DatastoreWriteFn(num_entities_to_write)
def test_DatastoreWriteFn_with_batch_size_exact_multiple(self):
num_entities_to_write = self._WRITE_BATCH_INITIAL_SIZE * 2
self.check_DatastoreWriteFn(num_entities_to_write)
def test_DatastoreWriteFn_with_dynamic_batch_sizes(self):
num_entities_to_write = self._WRITE_BATCH_INITIAL_SIZE * 3 + 50
self.check_DatastoreWriteFn(num_entities_to_write,
use_fixed_batch_size=False)
def check_DatastoreWriteFn(self, num_entities, use_fixed_batch_size=True):
"""A helper function to test DatastoreWriteFn."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
entities = [e.entity for e in
fake_datastore.create_entities(num_entities)]
expected_mutations = list(map(WriteToDatastore.to_upsert_mutation,
entities))
actual_mutations = []
self._mock_datastore.commit.side_effect = (
fake_datastore.create_commit(actual_mutations))
fixed_batch_size = None
if use_fixed_batch_size:
fixed_batch_size = self._WRITE_BATCH_INITIAL_SIZE
datastore_write_fn = _Mutate.DatastoreWriteFn(
self._PROJECT, fixed_batch_size=fixed_batch_size)
datastore_write_fn.start_bundle()
for mutation in expected_mutations:
datastore_write_fn.process(mutation)
datastore_write_fn.finish_bundle()
self.assertEqual(actual_mutations, expected_mutations)
if use_fixed_batch_size:
self.assertEqual(
(num_entities - 1) // self._WRITE_BATCH_INITIAL_SIZE + 1,
self._mock_datastore.commit.call_count)
else:
self._mock_datastore.commit.assert_called()
def test_DatastoreWriteLargeEntities(self):
"""100*100kB entities gets split over two Commit RPCs."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
entities = [e.entity for e in fake_datastore.create_entities(100)]
datastore_write_fn = _Mutate.DatastoreWriteFn(
self._PROJECT, fixed_batch_size=self._WRITE_BATCH_INITIAL_SIZE)
datastore_write_fn.start_bundle()
for entity in entities:
datastore_helper.add_properties(
entity, {'large': u'A' * 100000}, exclude_from_indexes=True)
datastore_write_fn.process(WriteToDatastore.to_upsert_mutation(entity))
datastore_write_fn.finish_bundle()
self.assertEqual(2, self._mock_datastore.commit.call_count)
def verify_unique_keys(self, queries):
"""A helper function that verifies if all the queries have unique keys."""
keys, _ = zip(*queries)
keys = set(keys)
self.assertEqual(len(keys), len(queries))
def check_estimated_size_bytes(self, entity_bytes, timestamp, namespace=None):
"""A helper method to test get_estimated_size_bytes"""
timestamp_req = helper.make_request(
self._PROJECT, namespace, helper.make_latest_timestamp_query(namespace))
timestamp_resp = self.make_stats_response(
{'timestamp': datastore_helper.from_timestamp(timestamp)})
kind_stat_req = helper.make_request(
self._PROJECT, namespace, helper.make_kind_stats_query(
namespace, self._query.kind[0].name,
datastore_helper.micros_from_timestamp(timestamp)))
kind_stat_resp = self.make_stats_response(
{'entity_bytes': entity_bytes})
def fake_run_query(req):
if req == timestamp_req:
return timestamp_resp
elif req == kind_stat_req:
return kind_stat_resp
else:
print(kind_stat_req)
raise ValueError("Unknown req: %s" % req)
self._mock_datastore.run_query.side_effect = fake_run_query
self.assertEqual(entity_bytes, ReadFromDatastore.get_estimated_size_bytes(
self._PROJECT, namespace, self._query, self._mock_datastore))
self.assertEqual(self._mock_datastore.run_query.call_args_list,
[call(timestamp_req), call(kind_stat_req)])
def make_stats_response(self, property_map):
resp = datastore_pb2.RunQueryResponse()
entity_result = resp.batch.entity_results.add()
datastore_helper.add_properties(entity_result.entity, property_map)
return resp
def split_query(self, query, num_splits):
"""Generate dummy query splits."""
split_queries = []
for _ in range(0, num_splits):
q = query_pb2.Query()
q.CopyFrom(query)
split_queries.append(q)
return split_queries
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -4,795,461,419,426,154,000 | 40.64966 | 80 | 0.679134 | false |
saintdragon2/python-3-lecture-2015 | civil-final/2nd_presentation/11조/pygame/tests/freetype_test.py | 4 | 58221 | import os
if os.environ.get('SDL_VIDEODRIVER') == 'dummy':
__tags__ = ('ignore', 'subprocess_ignore')
import sys
import ctypes
import weakref
import gc
if __name__ == '__main__':
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests.test_utils import test_not_implemented, unittest, \
geterror, arrinter
else:
from test.test_utils import test_not_implemented, unittest, geterror, \
arrinter
import pygame
try:
import pygame.freetype as ft
except ImportError:
ft = None
from pygame.compat import as_unicode, bytes_, unichr_, unicode_
FONTDIR = os.path.join(os.path.dirname (os.path.abspath (__file__)),
'fixtures', 'fonts')
def nullfont():
"""return an uninitialized font instance"""
return ft.Font.__new__(ft.Font)
max_point_size_FX6 = 0x7FFFFFFF
max_point_size = max_point_size_FX6 >> 6
max_point_size_f = max_point_size_FX6 * 0.015625
def surf_same_image(a, b):
"""Return True if a's pixel buffer is identical to b's"""
a_sz = a.get_height() * a.get_pitch()
b_sz = b.get_height() * b.get_pitch()
if a_sz != b_sz:
return False
a_bytes = ctypes.string_at(a._pixels_address, a_sz)
b_bytes = ctypes.string_at(b._pixels_address, b_sz)
return a_bytes == b_bytes
class FreeTypeFontTest(unittest.TestCase):
_fixed_path = os.path.join(FONTDIR, 'test_fixed.otf')
_sans_path = os.path.join(FONTDIR, 'test_sans.ttf')
_mono_path = os.path.join(FONTDIR, 'PyGameMono.otf')
_bmp_8_75dpi_path = os.path.join(FONTDIR, 'PyGameMono-8.bdf')
_bmp_18_75dpi_path = os.path.join(FONTDIR, 'PyGameMono-18-75dpi.bdf')
_bmp_18_100dpi_path = os.path.join(FONTDIR, 'PyGameMono-18-100dpi.bdf')
_TEST_FONTS = {}
def setUp(self):
ft.init()
if 'fixed' not in self._TEST_FONTS:
# Inconsolata is an open-source font designed by Raph Levien
# Licensed under the Open Font License
# http://www.levien.com/type/myfonts/inconsolata.html
self._TEST_FONTS['fixed'] = ft.Font(self._fixed_path)
if 'sans' not in self._TEST_FONTS:
# Liberation Sans is an open-source font designed by Steve Matteson
# Licensed under the GNU GPL
# https://fedorahosted.org/liberation-fonts/
self._TEST_FONTS['sans'] = ft.Font(self._sans_path)
if 'mono' not in self._TEST_FONTS:
# A scalable mono test font made for Pygame. It contains only
# a few glyphs: '\0', 'A', 'B', 'C', and U+13079.
# It also contains two bitmap sizes: 8.0 X 8.0 and 19.0 X 19.0.
self._TEST_FONTS['mono'] = ft.Font(self._mono_path)
if 'bmp-8-75dpi' not in self._TEST_FONTS:
# A fixed size bitmap mono test font made for Pygame.
# It contains only a few glyphs: '\0', 'A', 'B', 'C', and U+13079.
# The size is 8.0 X 8.0.
self._TEST_FONTS['bmp-8-75dpi'] = ft.Font(self._bmp_8_75dpi_path)
if 'bmp-18-75dpi' not in self._TEST_FONTS:
# A fixed size bitmap mono test font made for Pygame.
# It contains only a few glyphs: '\0', 'A', 'B', 'C', and U+13079.
# The size is 8.0 X 8.0.
self._TEST_FONTS['bmp-18-75dpi'] = ft.Font(self._bmp_18_75dpi_path)
if 'bmp-18-100dpi' not in self._TEST_FONTS:
# A fixed size bitmap mono test font made for Pygame.
# It contains only a few glyphs: '\0', 'A', 'B', 'C', and U+13079.
# The size is 8.0 X 8.0.
self._TEST_FONTS['bmp-18-100dpi'] = (
ft.Font(self._bmp_18_100dpi_path))
def test_freetype_defaultfont(self):
font = ft.Font(None)
self.assertEqual(font.name, "FreeSans")
def test_freetype_Font_init(self):
self.assertRaises(IOError, ft.Font, os.path.join (FONTDIR, 'nonexistant.ttf'))
f = self._TEST_FONTS['sans']
self.assertTrue(isinstance(f, ft.Font))
f = self._TEST_FONTS['fixed']
self.assertTrue(isinstance(f, ft.Font))
# Test keyword arguments
f = ft.Font(size=22, file=None)
self.assertEqual(f.size, 22)
f = ft.Font(font_index=0, file=None)
self.assertNotEqual(ft.get_default_resolution(), 100)
f = ft.Font(resolution=100, file=None)
self.assertEqual(f.resolution, 100)
f = ft.Font(ucs4=True, file=None)
self.assertTrue(f.ucs4)
self.assertRaises(OverflowError, ft.Font, file=None,
size=(max_point_size + 1))
self.assertRaises(OverflowError, ft.Font, file=None, size=-1)
f = ft.Font(None, size=24)
self.assert_(f.height > 0)
self.assertRaises(IOError, f.__init__,
os.path.join(FONTDIR, 'nonexistant.ttf'))
self.assertRaises(RuntimeError, f.get_rect, 'a', size=24)
# Test attribute preservation during reinitalization
f = ft.Font(self._sans_path, size=24, ucs4=True)
self.assertEqual(f.name, 'Liberation Sans')
self.assertTrue(f.scalable)
self.assertFalse(f.fixed_width)
self.assertTrue(f.antialiased)
self.assertFalse(f.oblique)
self.assertTrue(f.ucs4)
f.antialiased = False
f.oblique = True
f.__init__(self._mono_path)
self.assertEqual(f.name, 'PyGameMono')
self.assertTrue(f.scalable)
self.assertTrue(f.fixed_width)
self.assertFalse(f.antialiased)
self.assertTrue(f.oblique)
self.assertTrue(f.ucs4)
# For a bitmap font, the size is automatically set to the first
# size in the available sizes list.
f = ft.Font(self._bmp_8_75dpi_path)
sizes = f.get_sizes()
self.assertEqual(len(sizes), 1)
size_pt, width_px, height_px, x_ppem, y_ppem = sizes[0]
self.assertEqual(f.size, (x_ppem, y_ppem))
f.__init__(self._bmp_8_75dpi_path, size=12)
self.assertEqual(f.size, 12.0)
def test_freetype_Font_scalable(self):
f = self._TEST_FONTS['sans']
self.assertTrue(f.scalable)
self.assertRaises(RuntimeError, lambda : nullfont().scalable)
def test_freetype_Font_fixed_width(self):
f = self._TEST_FONTS['sans']
self.assertFalse(f.fixed_width)
f = self._TEST_FONTS['mono']
self.assertTrue(f.fixed_width)
self.assertRaises(RuntimeError, lambda : nullfont().fixed_width)
def test_freetype_Font_fixed_sizes(self):
f = self._TEST_FONTS['sans']
self.assertEqual(f.fixed_sizes, 0)
f = self._TEST_FONTS['bmp-8-75dpi']
self.assertEqual(f.fixed_sizes, 1)
f = self._TEST_FONTS['mono']
self.assertEqual(f.fixed_sizes, 2)
def test_freetype_Font_get_sizes(self):
f = self._TEST_FONTS['sans']
szlist = f.get_sizes()
self.assertTrue(isinstance(szlist, list))
self.assertEqual(len(szlist), 0)
f = self._TEST_FONTS['bmp-8-75dpi']
szlist = f.get_sizes()
self.assertTrue(isinstance(szlist, list))
self.assertEqual(len(szlist), 1)
size8 = szlist[0]
self.assertTrue(isinstance(size8[0], int))
self.assertEqual(size8[0], 8)
self.assertTrue(isinstance(size8[1], int))
self.assertTrue(isinstance(size8[2], int))
self.assertTrue(isinstance(size8[3], float))
self.assertEqual(int(size8[3] * 64.0 + 0.5), 8 * 64)
self.assertTrue(isinstance(size8[4], float))
self.assertEqual(int(size8[4] * 64.0 + 0.5), 8 * 64)
f = self._TEST_FONTS['mono']
szlist = f.get_sizes()
self.assertTrue(isinstance(szlist, list))
self.assertEqual(len(szlist), 2)
size8 = szlist[0]
self.assertEqual(size8[3], 8)
self.assertEqual(int(size8[3] * 64.0 + 0.5), 8 * 64)
self.assertEqual(int(size8[4] * 64.0 + 0.5), 8 * 64)
size19 = szlist[1]
self.assertEqual(size19[3], 19)
self.assertEqual(int(size19[3] * 64.0 + 0.5), 19 * 64)
self.assertEqual(int(size19[4] * 64.0 + 0.5), 19 * 64)
def test_freetype_Font_use_bitmap_strikes(self):
f = self._TEST_FONTS['mono']
try:
# use_bitmap_strikes == True
#
self.assertTrue(f.use_bitmap_strikes)
# bitmap compatible properties
s_strike, sz = f.render_raw('A', size=19)
try:
f.vertical = True
s_strike_vert, sz = f.render_raw('A', size=19)
finally:
f.vertical = False
try:
f.wide = True
s_strike_wide, sz = f.render_raw('A', size=19)
finally:
f.wide = False
try:
f.underline = True
s_strike_underline, sz = f.render_raw('A', size=19)
finally:
f.underline = False
# bitmap incompatible properties
s_strike_rot45, sz = f.render_raw('A', size=19, rotation=45)
try:
f.strong = True
s_strike_strong, sz = f.render_raw('A', size=19)
finally:
f.strong = False
try:
f.oblique = True
s_strike_oblique, sz = f.render_raw('A', size=19)
finally:
f.oblique = False
# compare with use_bitmap_strikes == False
#
f.use_bitmap_strikes = False
self.assertFalse(f.use_bitmap_strikes)
# bitmap compatible properties
s_outline, sz = f.render_raw('A', size=19)
self.assertNotEqual(s_outline, s_strike)
try:
f.vertical = True
s_outline, sz = f.render_raw('A', size=19)
self.assertNotEqual(s_outline, s_strike_vert)
finally:
f.vertical = False
try:
f.wide = True
s_outline, sz = f.render_raw('A', size=19)
self.assertNotEqual(s_outline, s_strike_wide)
finally:
f.wide = False
try:
f.underline = True
s_outline, sz = f.render_raw('A', size=19)
self.assertNotEqual(s_outline, s_strike_underline)
finally:
f.underline = False
# bitmap incompatible properties
s_outline, sz = f.render_raw('A', size=19, rotation=45)
self.assertEqual(s_outline, s_strike_rot45)
try:
f.strong = True
s_outline, sz = f.render_raw('A', size=19)
self.assertEqual(s_outline, s_strike_strong)
finally:
f.strong = False
try:
f.oblique = True
s_outline, sz = f.render_raw('A', size=19)
self.assertEqual(s_outline, s_strike_oblique)
finally:
f.oblique = False
finally:
f.use_bitmap_strikes = True
def test_freetype_Font_bitmap_files(self):
"""Ensure bitmap file restrictions are caught"""
f = self._TEST_FONTS['bmp-8-75dpi']
f_null = nullfont()
s = pygame.Surface((10, 10), 0, 32)
a = s.get_view('3')
exception = AttributeError
self.assertRaises(exception, setattr, f, 'strong', True)
self.assertRaises(exception, setattr, f, 'oblique', True)
self.assertRaises(exception, setattr, f, 'style', ft.STYLE_STRONG)
self.assertRaises(exception, setattr, f, 'style', ft.STYLE_OBLIQUE)
exception = RuntimeError
self.assertRaises(exception, setattr, f_null, 'strong', True)
self.assertRaises(exception, setattr, f_null, 'oblique', True)
self.assertRaises(exception, setattr, f_null, 'style', ft.STYLE_STRONG)
self.assertRaises(exception, setattr, f_null, 'style', ft.STYLE_OBLIQUE)
exception = ValueError
self.assertRaises(exception, f.render,
'A', (0, 0, 0), size=8, rotation=1)
self.assertRaises(exception, f.render,
'A', (0, 0, 0), size=8, style=ft.STYLE_OBLIQUE)
self.assertRaises(exception, f.render,
'A', (0, 0, 0), size=8, style=ft.STYLE_STRONG)
self.assertRaises(exception, f.render_raw, 'A', size=8, rotation=1)
self.assertRaises(exception, f.render_raw,
'A', size=8, style=ft.STYLE_OBLIQUE)
self.assertRaises(exception, f.render_raw,
'A', size=8, style=ft.STYLE_STRONG)
self.assertRaises(exception, f.render_to,
s, (0, 0), 'A', (0, 0, 0), size=8, rotation=1)
self.assertRaises(exception, f.render_to,
s, (0, 0), 'A', (0, 0, 0), size=8,
style=ft.STYLE_OBLIQUE)
self.assertRaises(exception, f.render_to,
s, (0, 0), 'A', (0, 0, 0), size=8,
style=ft.STYLE_STRONG)
self.assertRaises(exception, f.render_raw_to,
a, 'A', size=8, rotation=1)
self.assertRaises(exception, f.render_raw_to,
a, 'A', size=8, style=ft.STYLE_OBLIQUE)
self.assertRaises(exception, f.render_raw_to,
a, 'A', size=8, style=ft.STYLE_STRONG)
self.assertRaises(exception, f.get_rect, 'A', size=8, rotation=1)
self.assertRaises(exception, f.get_rect,
'A', size=8, style=ft.STYLE_OBLIQUE)
self.assertRaises(exception, f.get_rect,
'A', size=8, style=ft.STYLE_STRONG)
# Unsupported point size
exception = pygame.error
self.assertRaises(exception, f.get_rect, 'A', size=42)
self.assertRaises(exception, f.get_metrics, 'A', size=42)
self.assertRaises(exception, f.get_sized_ascender, 42)
self.assertRaises(exception, f.get_sized_descender, 42)
self.assertRaises(exception, f.get_sized_height, 42)
self.assertRaises(exception, f.get_sized_glyph_height, 42)
def test_freetype_Font_get_metrics(self):
font = self._TEST_FONTS['sans']
metrics = font.get_metrics('ABCD', size=24)
self.assertEqual(len(metrics), len('ABCD'))
self.assertTrue(isinstance(metrics, list))
for metrics_tuple in metrics:
self.assertTrue(isinstance(metrics_tuple, tuple), metrics_tuple)
self.assertEqual(len(metrics_tuple), 6)
for m in metrics_tuple[:4]:
self.assertTrue(isinstance(m, int))
for m in metrics_tuple[4:]:
self.assertTrue(isinstance(m, float))
# test for empty string
metrics = font.get_metrics('', size=24)
self.assertEqual(metrics, [])
# test for invalid string
self.assertRaises(TypeError, font.get_metrics, 24, 24)
# raises exception when uninitalized
self.assertRaises(RuntimeError, nullfont().get_metrics,
'a', size=24)
def test_freetype_Font_get_rect(self):
font = self._TEST_FONTS['sans']
def test_rect(r):
self.assertTrue(isinstance(r, pygame.Rect))
rect_default = font.get_rect("ABCDabcd", size=24)
test_rect(rect_default)
self.assertTrue(rect_default.size > (0, 0))
self.assertTrue(rect_default.width > rect_default.height)
rect_bigger = font.get_rect("ABCDabcd", size=32)
test_rect(rect_bigger)
self.assertTrue(rect_bigger.size > rect_default.size)
rect_strong = font.get_rect("ABCDabcd", size=24, style=ft.STYLE_STRONG)
test_rect(rect_strong)
self.assertTrue(rect_strong.size > rect_default.size)
font.vertical = True
rect_vert = font.get_rect("ABCDabcd", size=24)
test_rect(rect_vert)
self.assertTrue(rect_vert.width < rect_vert.height)
font.vertical = False
rect_oblique = font.get_rect("ABCDabcd", size=24, style=ft.STYLE_OBLIQUE)
test_rect(rect_oblique)
self.assertTrue(rect_oblique.width > rect_default.width)
self.assertTrue(rect_oblique.height == rect_default.height)
rect_under = font.get_rect("ABCDabcd", size=24, style=ft.STYLE_UNDERLINE)
test_rect(rect_under)
self.assertTrue(rect_under.width == rect_default.width)
self.assertTrue(rect_under.height > rect_default.height)
# Rect size should change if UTF surrogate pairs are treated as
# one code point or two.
ufont = self._TEST_FONTS['mono']
rect_utf32 = ufont.get_rect(as_unicode(r'\U00013079'), size=24)
rect_utf16 = ufont.get_rect(as_unicode(r'\uD80C\uDC79'), size=24)
self.assertEqual(rect_utf16, rect_utf32);
ufont.ucs4 = True
try:
rect_utf16 = ufont.get_rect(as_unicode(r'\uD80C\uDC79'), size=24)
finally:
ufont.ucs4 = False
self.assertNotEqual(rect_utf16, rect_utf32);
self.assertRaises(RuntimeError,
nullfont().get_rect, 'a', size=24)
# text stretching
rect12 = font.get_rect('A', size=12.0)
rect24 = font.get_rect('A', size=24.0)
rect_x = font.get_rect('A', size=(24.0, 12.0))
self.assertEqual(rect_x.width, rect24.width)
self.assertEqual(rect_x.height, rect12.height)
rect_y = font.get_rect('A', size=(12.0, 24.0))
self.assertEqual(rect_y.width, rect12.width)
self.assertEqual(rect_y.height, rect24.height)
def test_freetype_Font_height(self):
f = self._TEST_FONTS['sans']
self.assertEqual(f.height, 2355)
f = self._TEST_FONTS['fixed']
self.assertEqual(f.height, 1100)
self.assertRaises(RuntimeError, lambda : nullfont().height)
def test_freetype_Font_name(self):
f = self._TEST_FONTS['sans']
self.assertEqual(f.name, 'Liberation Sans')
f = self._TEST_FONTS['fixed']
self.assertEqual(f.name, 'Inconsolata')
nf = nullfont()
self.assertEqual(nf.name, repr(nf))
def test_freetype_Font_size(self):
f = ft.Font(None, size=12)
self.assertEqual(f.size, 12)
f.size = 22
self.assertEqual(f.size, 22)
f.size = 0
self.assertEqual(f.size, 0)
f.size = max_point_size
self.assertEqual(f.size, max_point_size)
f.size = 6.5
self.assertEqual(f.size, 6.5)
f.size = max_point_size_f
self.assertEqual(f.size, max_point_size_f)
self.assertRaises(OverflowError, setattr, f, 'size', -1)
self.assertRaises(OverflowError, setattr, f, 'size',
(max_point_size + 1))
f.size = 24.0, 0
size = f.size
self.assertTrue(isinstance(size, float))
self.assertEqual(size, 24.0)
f.size = 16, 16
size = f.size
self.assertTrue(isinstance(size, tuple))
self.assertEqual(len(size), 2)
x, y = size
self.assertTrue(isinstance(x, float))
self.assertEqual(x, 16.0)
self.assertTrue(isinstance(y, float))
self.assertEqual(y, 16.0)
f.size = 20.5, 22.25
x, y = f.size
self.assertEqual(x, 20.5)
self.assertEqual(y, 22.25)
f.size = 0, 0
size = f.size
self.assertTrue(isinstance(size, float))
self.assertEqual(size, 0.0)
self.assertRaises(ValueError, setattr, f, 'size', (0, 24.0))
self.assertRaises(TypeError, setattr, f, 'size', (24.0,))
self.assertRaises(TypeError, setattr, f, 'size', (24.0, 0, 0))
self.assertRaises(TypeError, setattr, f, 'size', (24.0j, 24.0))
self.assertRaises(TypeError, setattr, f, 'size', (24.0, 24.0j))
self.assertRaises(OverflowError, setattr, f, 'size', (-1, 16))
self.assertRaises(OverflowError, setattr, f, 'size',
(max_point_size + 1, 16))
self.assertRaises(OverflowError, setattr, f, 'size', (16, -1))
self.assertRaises(OverflowError, setattr, f, 'size',
(16, max_point_size + 1))
# bitmap files with identical point size but differing ppems.
f75 = self._TEST_FONTS['bmp-18-75dpi']
sizes = f75.get_sizes()
self.assertEqual(len(sizes), 1)
size_pt, width_px, height_px, x_ppem, y_ppem = sizes[0]
self.assertEqual(size_pt, 18)
self.assertEqual(x_ppem, 19.0)
self.assertEqual(y_ppem, 19.0)
rect = f75.get_rect('A', size=18)
rect = f75.get_rect('A', size=19)
rect = f75.get_rect('A', size=(19.0, 19.0))
self.assertRaises(pygame.error, f75.get_rect, 'A', size=17)
f100 = self._TEST_FONTS['bmp-18-100dpi']
sizes = f100.get_sizes()
self.assertEqual(len(sizes), 1)
size_pt, width_px, height_px, x_ppem, y_ppem = sizes[0]
self.assertEqual(size_pt, 18)
self.assertEqual(x_ppem, 25.0)
self.assertEqual(y_ppem, 25.0)
rect = f100.get_rect('A', size=18)
rect = f100.get_rect('A', size=25)
rect = f100.get_rect('A', size=(25.0, 25.0))
self.assertRaises(pygame.error, f100.get_rect, 'A', size=17)
def test_freetype_Font_rotation(self):
test_angles = [(30, 30),
(360, 0), (390, 30),
(720, 0), (764, 44),
(-30, 330),
(-360, 0), (-390, 330),
(-720, 0), (-764, 316)]
f = ft.Font(None)
self.assertEqual(f.rotation, 0)
for r, r_reduced in test_angles:
f.rotation = r
self.assertEqual(f.rotation, r_reduced,
"for angle %d: %d != %d" %
(r, f.rotation, r_reduced))
self.assertRaises(TypeError, setattr, f, 'rotation', '12')
def test_freetype_Font_render_to(self):
# Rendering to an existing target surface is equivalent to
# blitting a surface returned by Font.render with the target.
font = self._TEST_FONTS['sans']
surf = pygame.Surface((800, 600))
color = pygame.Color(0, 0, 0)
rrect = font.render_to(surf, (32, 32),
'FoobarBaz', color, None, size=24)
self.assertTrue(isinstance(rrect, pygame.Rect))
self.assertEqual(rrect.top, rrect.height)
## self.assertEqual(rrect.left, something or other)
rcopy = rrect.copy()
rcopy.topleft = (32, 32)
self.assertTrue(surf.get_rect().contains(rcopy))
rect = pygame.Rect(20, 20, 2, 2)
rrect = font.render_to(surf, rect, 'FoobarBax', color, None, size=24)
self.assertEqual(rrect.top, rrect.height)
self.assertNotEqual(rrect.size, rect.size)
rrect = font.render_to(surf, (20.1, 18.9), 'FoobarBax',
color, None, size=24)
## self.assertEqual(tuple(rend[1].topleft), (20, 18))
rrect = font.render_to(surf, rect, '', color, None, size=24)
self.assertFalse(rrect)
self.assertEqual(rrect.height, font.get_sized_height(24))
# invalid surf test
self.assertRaises(TypeError, font.render_to,
"not a surface", "text", color)
self.assertRaises(TypeError, font.render_to,
pygame.Surface, "text", color)
# invalid dest test
for dest in [None, 0, 'a', 'ab',
(), (1,), ('a', 2), (1, 'a'), (1+2j, 2), (1, 1+2j),
(1, int), (int, 1)]:
self.assertRaises(TypeError, font.render,
surf, dest, 'foobar', color, size=24)
# misc parameter test
self.assertRaises(ValueError, font.render_to, surf, (0, 0),
'foobar', color)
self.assertRaises(TypeError, font.render_to, surf, (0, 0),
'foobar', color, "", size=24)
self.assertRaises(ValueError, font.render_to, surf, (0, 0),
'foobar', color, None, style=42, size=24)
self.assertRaises(TypeError, font.render_to, surf, (0, 0),
'foobar', color, None, style=None, size=24)
self.assertRaises(ValueError, font.render_to, surf, (0, 0),
'foobar', color, None, style=97, size=24)
def test_freetype_Font_render(self):
font = self._TEST_FONTS['sans']
surf = pygame.Surface((800, 600))
color = pygame.Color(0, 0, 0)
rend = font.render('FoobarBaz', pygame.Color(0, 0, 0), None, size=24)
self.assertTrue(isinstance(rend, tuple))
self.assertEqual(len(rend), 2)
self.assertTrue(isinstance(rend[0], pygame.Surface))
self.assertTrue(isinstance(rend[1], pygame.Rect))
self.assertEqual(rend[0].get_rect().size, rend[1].size)
s, r = font.render('', pygame.Color(0, 0, 0), None, size=24)
self.assertEqual(r.width, 1)
self.assertEqual(r.height, font.get_sized_height(24))
self.assertEqual(s.get_size(), r.size)
self.assertEqual(s.get_bitsize(), 32)
# misc parameter test
self.assertRaises(ValueError, font.render, 'foobar', color)
self.assertRaises(TypeError, font.render, 'foobar', color, "",
size=24)
self.assertRaises(ValueError, font.render, 'foobar', color, None,
style=42, size=24)
self.assertRaises(TypeError, font.render, 'foobar', color, None,
style=None, size=24)
self.assertRaises(ValueError, font.render, 'foobar', color, None,
style=97, size=24)
# valid surrogate pairs
font2 = self._TEST_FONTS['mono']
ucs4 = font2.ucs4
try:
font2.ucs4 = False
rend1 = font2.render(as_unicode(r'\uD80C\uDC79'), color, size=24)
rend2 = font2.render(as_unicode(r'\U00013079'), color, size=24)
self.assertEqual(rend1[1], rend2[1])
font2.ucs4 = True
rend1 = font2.render(as_unicode(r'\uD80C\uDC79'), color, size=24)
self.assertNotEqual(rend1[1], rend2[1])
finally:
font2.ucs4 = ucs4
# malformed surrogate pairs
self.assertRaises(UnicodeEncodeError, font.render,
as_unicode(r'\uD80C'), color, size=24)
self.assertRaises(UnicodeEncodeError, font.render,
as_unicode(r'\uDCA7'), color, size=24)
self.assertRaises(UnicodeEncodeError, font.render,
as_unicode(r'\uD7FF\uDCA7'), color, size=24)
self.assertRaises(UnicodeEncodeError, font.render,
as_unicode(r'\uDC00\uDCA7'), color, size=24)
self.assertRaises(UnicodeEncodeError, font.render,
as_unicode(r'\uD80C\uDBFF'), color, size=24)
self.assertRaises(UnicodeEncodeError, font.render,
as_unicode(r'\uD80C\uE000'), color, size=24)
# raises exception when uninitalized
self.assertRaises(RuntimeError, nullfont().render,
'a', (0, 0, 0), size=24)
# Confirm the correct glpyhs are returned for a couple of
# unicode code points, 'A' and '\U00023079'. For each code point
# the rendered glyph is compared with an image of glyph bitmap
# as exported by FontForge.
path = os.path.join(FONTDIR, 'A_PyGameMono-8.png')
A = pygame.image.load(path)
path = os.path.join(FONTDIR, 'u13079_PyGameMono-8.png')
u13079 = pygame.image.load(path)
font = self._TEST_FONTS['mono']
font.ucs4 = False
A_rendered, r = font.render('A', bgcolor=pygame.Color('white'), size=8)
u13079_rendered, r = font.render(as_unicode(r'\U00013079'),
bgcolor=pygame.Color('white'), size=8)
## before comparing the surfaces, make sure they are the same
## pixel format. Use 32-bit SRCALPHA to avoid row padding and
## undefined bytes (the alpha byte will be set to 255.)
bitmap = pygame.Surface(A.get_size(), pygame.SRCALPHA, 32)
bitmap.blit(A, (0, 0))
rendering = pygame.Surface(A_rendered.get_size(), pygame.SRCALPHA, 32)
rendering.blit(A_rendered, (0, 0))
self.assertTrue(surf_same_image(rendering, bitmap))
bitmap = pygame.Surface(u13079.get_size(), pygame.SRCALPHA, 32)
bitmap.blit(u13079, (0, 0))
rendering = pygame.Surface(u13079_rendered.get_size(),
pygame.SRCALPHA, 32)
rendering.blit(u13079_rendered, (0, 0))
self.assertTrue(surf_same_image(rendering, bitmap))
def test_freetype_Font_render_mono(self):
font = self._TEST_FONTS['sans']
color = pygame.Color('black')
colorkey = pygame.Color('white')
text = "."
save_antialiased = font.antialiased
font.antialiased = False
try:
surf, r = font.render(text, color, size=24)
self.assertEqual(surf.get_bitsize(), 8)
flags = surf.get_flags()
self.assertTrue(flags & pygame.SRCCOLORKEY)
self.assertFalse(flags & (pygame.SRCALPHA | pygame.HWSURFACE))
self.assertEqual(surf.get_colorkey(), colorkey)
self.assertTrue(surf.get_alpha() is None)
translucent_color = pygame.Color(*color)
translucent_color.a = 55
surf, r = font.render(text, translucent_color, size=24)
self.assertEqual(surf.get_bitsize(), 8)
flags = surf.get_flags()
self.assertTrue(flags & (pygame.SRCCOLORKEY | pygame.SRCALPHA))
self.assertFalse(flags & pygame.HWSURFACE)
self.assertEqual(surf.get_colorkey(), colorkey)
self.assertEqual(surf.get_alpha(), translucent_color.a)
surf, r = font.render(text, color, colorkey, size=24)
self.assertEqual(surf.get_bitsize(), 32)
finally:
font.antialiased = save_antialiased
def test_freetype_Font_render_to_mono(self):
# Blitting is done in two stages. First the target is alpha filled
# with the background color, if any. Second, the foreground
# color is alpha blitted to the background.
font = self._TEST_FONTS['sans']
text = " ."
rect = font.get_rect(text, size=24)
size = rect.size
fg = pygame.Surface((1, 1), pygame.SRCALPHA, 32)
bg = pygame.Surface((1, 1), pygame.SRCALPHA, 32)
surrogate = pygame.Surface((1, 1), pygame.SRCALPHA, 32)
surfaces = [pygame.Surface(size, 0, 8),
pygame.Surface(size, 0, 16),
pygame.Surface(size, pygame.SRCALPHA, 16),
pygame.Surface(size, 0, 24),
pygame.Surface(size, 0, 32),
pygame.Surface(size, pygame.SRCALPHA, 32)]
fg_colors = [
surfaces[0].get_palette_at(2),
surfaces[1].unmap_rgb(surfaces[1].map_rgb((128, 64, 200))),
surfaces[2].unmap_rgb(surfaces[2].map_rgb((99, 0, 100, 64))),
(128, 97, 213),
(128, 97, 213),
(128, 97, 213, 60)]
fg_colors = [pygame.Color(*c) for c in fg_colors]
self.assertEqual(len(surfaces), len(fg_colors)) # integrity check
bg_colors = [
surfaces[0].get_palette_at(4),
surfaces[1].unmap_rgb(surfaces[1].map_rgb((220, 20, 99))),
surfaces[2].unmap_rgb(surfaces[2].map_rgb((55, 200, 0, 86))),
(255, 120, 13),
(255, 120, 13),
(255, 120, 13, 180)]
bg_colors = [pygame.Color(*c) for c in bg_colors]
self.assertEqual(len(surfaces), len(bg_colors)) # integrity check
save_antialiased = font.antialiased
font.antialiased = False
try:
fill_color = pygame.Color('black')
for i in range(len(surfaces)):
surf = surfaces[i]
surf.fill(fill_color)
fg_color = fg_colors[i]
fg.set_at((0, 0), fg_color)
surf.blit(fg, (0, 0))
r_fg_color = surf.get_at((0, 0))
surf.set_at((0, 0), fill_color)
rrect = font.render_to(surf, (0, 0), text, fg_color,
size=24)
bottomleft = 0, rrect.height - 1
self.assertEqual(surf.get_at(bottomleft), fill_color)
bottomright = rrect.width - 1, rrect.height - 1
self.assertEqual(surf.get_at(bottomright), r_fg_color)
for i in range(len(surfaces)):
surf = surfaces[i]
surf.fill(fill_color)
fg_color = fg_colors[i]
bg_color = bg_colors[i]
bg.set_at((0, 0), bg_color)
fg.set_at((0, 0), fg_color)
if surf.get_bitsize() == 24:
# For a 24 bit target surface test against Pygame's alpha
# blit as there appears to be a problem with SDL's alpha
# blit:
#
# self.assertEqual(surf.get_at(bottomright), r_fg_color)
#
# raises
#
# AssertionError: (128, 97, 213, 255) != (129, 98, 213, 255)
#
surrogate.set_at((0, 0), fill_color)
surrogate.blit(bg, (0, 0))
r_bg_color = surrogate.get_at((0, 0))
surrogate.blit(fg, (0, 0))
r_fg_color = surrogate.get_at((0, 0))
else:
# Surface blit values for comparison.
surf.blit(bg, (0, 0))
r_bg_color = surf.get_at((0, 0))
surf.blit(fg, (0, 0))
r_fg_color = surf.get_at((0, 0))
surf.set_at((0, 0), fill_color)
rrect = font.render_to(surf, (0, 0), text, fg_color,
bg_color, size=24)
bottomleft = 0, rrect.height - 1
self.assertEqual(surf.get_at(bottomleft), r_bg_color)
bottomright = rrect.width - 1, rrect.height - 1
self.assertEqual(surf.get_at(bottomright), r_fg_color)
finally:
font.antialiased = save_antialiased
def test_freetype_Font_render_raw(self):
font = self._TEST_FONTS['sans']
text = "abc"
size = font.get_rect(text, size=24).size
rend = font.render_raw(text, size=24)
self.assertTrue(isinstance(rend, tuple))
self.assertEqual(len(rend), 2)
r, s = rend
self.assertTrue(isinstance(r, bytes_))
self.assertTrue(isinstance(s, tuple))
self.assertTrue(len(s), 2)
w, h = s
self.assertTrue(isinstance(w, int))
self.assertTrue(isinstance(w, int))
self.assertEqual(s, size)
self.assertEqual(len(r), w * h)
r, (w, h) = font.render_raw('', size=24)
self.assertEqual(w, 0)
self.assertEqual(h, font.height)
self.assertEqual(len(r), 0)
# bug with decenders: this would crash
rend = font.render_raw('render_raw', size=24)
# bug with non-printable characters: this would cause a crash
# because the text length was not adjusted for skipped characters.
text = unicode_("").join([unichr_(i) for i in range(31, 64)])
rend = font.render_raw(text, size=10)
def test_freetype_Font_render_raw_to(self):
# This only checks that blits do not crash. It needs to check:
# - int values
# - invert option
#
font = self._TEST_FONTS['sans']
text = "abc"
# No frills antialiased render to int1 (__render_glyph_INT)
srect = font.get_rect(text, size=24)
surf = pygame.Surface(srect.size, 0, 8)
rrect = font.render_raw_to(surf.get_view('2'), text, size=24)
self.assertEqual(rrect, srect)
for bpp in [24, 32]:
surf = pygame.Surface(srect.size, 0, bpp)
rrect = font.render_raw_to(surf.get_view('r'), text, size=24)
self.assertEqual(rrect, srect)
# Underlining to int1 (__fill_glyph_INT)
srect = font.get_rect(text, size=24, style=ft.STYLE_UNDERLINE)
surf = pygame.Surface(srect.size, 0, 8)
rrect = font.render_raw_to(surf.get_view('2'), text, size=24,
style=ft.STYLE_UNDERLINE)
self.assertEqual(rrect, srect)
for bpp in [24, 32]:
surf = pygame.Surface(srect.size, 0, bpp)
rrect = font.render_raw_to(surf.get_view('r'), text, size=24,
style=ft.STYLE_UNDERLINE)
self.assertEqual(rrect, srect)
# Unaliased (mono) rendering to int1 (__render_glyph_MONO_as_INT)
font.antialiased = False
try:
srect = font.get_rect(text, size=24)
surf = pygame.Surface(srect.size, 0, 8)
rrect = font.render_raw_to(surf.get_view('2'), text, size=24)
self.assertEqual(rrect, srect)
for bpp in [24, 32]:
surf = pygame.Surface(srect.size, 0, bpp)
rrect = font.render_raw_to(surf.get_view('r'), text, size=24)
self.assertEqual(rrect, srect)
finally:
font.antialiased = True
# Antialiased render to ints sized greater than 1 byte
# (__render_glyph_INT)
srect = font.get_rect(text, size=24)
for bpp in [16, 24, 32]:
surf = pygame.Surface(srect.size, 0, bpp)
rrect = font.render_raw_to(surf.get_view('2'), text, size=24)
self.assertEqual(rrect, srect)
# Underline render to ints sized greater than 1 byte
# (__fill_glyph_INT)
srect = font.get_rect(text, size=24, style=ft.STYLE_UNDERLINE)
for bpp in [16, 24, 32]:
surf = pygame.Surface(srect.size, 0, bpp)
rrect = font.render_raw_to(surf.get_view('2'), text, size=24,
style=ft.STYLE_UNDERLINE)
self.assertEqual(rrect, srect)
# Unaliased (mono) rendering to ints greater than 1 byte
# (__render_glyph_MONO_as_INT)
font.antialiased = False
try:
srect = font.get_rect(text, size=24)
for bpp in [16, 24, 32]:
surf = pygame.Surface(srect.size, 0, bpp)
rrect = font.render_raw_to(surf.get_view('2'),
text, size=24)
self.assertEqual(rrect, srect)
finally:
font.antialiased = True
def test_freetype_Font_text_is_None(self):
f = ft.Font(self._sans_path, 36)
f.style = ft.STYLE_NORMAL
f.rotation = 0
text = 'ABCD'
# reference values
get_rect = f.get_rect(text)
f.vertical = True
get_rect_vert = f.get_rect(text)
self.assertTrue(get_rect_vert.width < get_rect.width)
self.assertTrue(get_rect_vert.height > get_rect.height)
f.vertical = False
render_to_surf = pygame.Surface(get_rect.size, pygame.SRCALPHA, 32)
arr = arrinter.Array(get_rect.size, 'u', 1)
render = f.render(text, (0, 0, 0))
render_to = f.render_to(render_to_surf, (0, 0), text, (0, 0, 0))
render_raw = f.render_raw(text)
render_raw_to = f.render_raw_to(arr, text)
# comparisons
surf = pygame.Surface(get_rect.size, pygame.SRCALPHA, 32)
self.assertEqual(f.get_rect(None), get_rect)
s, r = f.render(None, (0, 0, 0))
self.assertEqual(r, render[1])
self.assertTrue(surf_same_image(s, render[0]))
r = f.render_to(surf, (0, 0), None, (0, 0, 0))
self.assertEqual(r, render_to)
self.assertTrue(surf_same_image(surf, render_to_surf))
px, sz = f.render_raw(None)
self.assertEqual(sz, render_raw[1])
self.assertEqual(px, render_raw[0])
sz = f.render_raw_to(arr, None)
self.assertEqual(sz, render_raw_to)
# vertical: trigger glyph positioning.
f.vertical = True
r = f.get_rect(None)
self.assertEqual(r, get_rect_vert)
f.vertical = False
# wide style: trigger glyph reload
r = f.get_rect(None, style=ft.STYLE_WIDE)
self.assertEqual(r.height, get_rect.height)
self.assertTrue(r.width > get_rect.width)
r = f.get_rect(None)
self.assertEqual(r, get_rect)
# rotated: trigger glyph reload
r = f.get_rect(None, rotation=90)
self.assertEqual(r.width, get_rect.height)
self.assertEqual(r.height, get_rect.width)
# this method will not support None text
self.assertRaises(TypeError, f.get_metrics, None)
def test_freetype_Font_fgcolor(self):
f = ft.Font(self._bmp_8_75dpi_path)
notdef = '\0' # the PyGameMono .notdef glyph has a pixel at (0, 0)
f.origin = False
f.pad = False
black = pygame.Color('black') # initial color
green = pygame.Color('green')
alpha128 = pygame.Color(10, 20, 30, 128)
c = f.fgcolor
self.assertTrue(isinstance(c, pygame.Color))
self.assertEqual(c, black)
s, r = f.render(notdef)
self.assertEqual(s.get_at((0, 0)), black)
f.fgcolor = green
self.assertEqual(f.fgcolor, green)
s, r = f.render(notdef)
self.assertEqual(s.get_at((0, 0)), green)
f.fgcolor = alpha128
s, r = f.render(notdef)
self.assertEqual(s.get_at((0, 0)), alpha128)
surf = pygame.Surface(f.get_rect(notdef).size, pygame.SRCALPHA, 32)
f.render_to(surf, (0, 0), None)
self.assertEqual(surf.get_at((0, 0)), alpha128)
self.assertRaises(AttributeError, setattr, f, 'fgcolor', None)
if pygame.HAVE_NEWBUF:
def test_newbuf(self):
self.NEWBUF_test_newbuf()
if is_pygame_pkg:
from pygame.tests.test_utils import buftools
else:
from test.test_utils import buftools
def NEWBUF_test_newbuf(self):
Exporter = self.buftools.Exporter
font = self._TEST_FONTS['sans']
srect = font.get_rect("Hi", size=12)
for format in ['b', 'B', 'h', 'H', 'i', 'I', 'l', 'L', 'q', 'Q',
'x', '1x', '2x', '3x', '4x', '5x', '6x', '7x',
'8x', '9x', '<h', '>h', '=h', '@h', '!h', '1h', '=1h']:
newbuf = Exporter(srect.size, format=format)
rrect = font.render_raw_to(newbuf, "Hi", size=12)
self.assertEqual(rrect, srect)
# Some unsupported formats
for format in ['f', 'd', '2h', '?', 'hh']:
newbuf = Exporter(srect.size, format=format, itemsize=4)
self.assertRaises(ValueError, font.render_raw_to,
newbuf, "Hi", size=12)
def test_freetype_Font_style(self):
font = self._TEST_FONTS['sans']
# make sure STYLE_NORMAL is the default value
self.assertEqual(ft.STYLE_NORMAL, font.style)
# make sure we check for style type
try: font.style = "None"
except TypeError: pass
else: self.fail("Failed style assignement")
try: font.style = None
except TypeError: pass
else: self.fail("Failed style assignement")
# make sure we only accept valid constants
try: font.style = 112
except ValueError: pass
else: self.fail("Failed style assignement")
# make assure no assignements happened
self.assertEqual(ft.STYLE_NORMAL, font.style)
# test assignement
font.style = ft.STYLE_UNDERLINE
self.assertEqual(ft.STYLE_UNDERLINE, font.style)
# test complex styles
st = ( ft.STYLE_STRONG | ft.STYLE_UNDERLINE |
ft.STYLE_OBLIQUE )
font.style = st
self.assertEqual(st, font.style)
# revert changes
font.style = ft.STYLE_NORMAL
self.assertEqual(ft.STYLE_NORMAL, font.style)
def test_freetype_Font_resolution(self):
text = "|" # Differs in width and height
resolution = ft.get_default_resolution()
new_font = ft.Font(self._sans_path, resolution=2 * resolution)
self.assertEqual(new_font.resolution, 2 * resolution)
size_normal = self._TEST_FONTS['sans'].get_rect(text, size=24).size
size_scaled = new_font.get_rect(text, size=24).size
size_by_2 = size_normal[0] * 2
self.assertTrue(size_by_2 + 2 >= size_scaled[0] >= size_by_2 - 2,
"%i not equal %i" % (size_scaled[1], size_by_2))
size_by_2 = size_normal[1] * 2
self.assertTrue(size_by_2 + 2 >= size_scaled[1] >= size_by_2 - 2,
"%i not equal %i" % (size_scaled[1], size_by_2))
new_resolution = resolution + 10
ft.set_default_resolution(new_resolution)
try:
new_font = ft.Font(self._sans_path, resolution=0)
self.assertEqual(new_font.resolution, new_resolution)
finally:
ft.set_default_resolution()
def test_freetype_Font_path(self):
self.assertEqual(self._TEST_FONTS['sans'].path, self._sans_path)
self.assertRaises(AttributeError, getattr, nullfont(), 'path')
# This Font cache test is conditional on freetype being built by a debug
# version of Python or with the C macro PGFT_DEBUG_CACHE defined.
def test_freetype_Font_cache(self):
glyphs = "abcde"
glen = len(glyphs)
other_glyphs = "123"
oglen = len(other_glyphs)
uempty = unicode_("")
## many_glyphs = (uempty.join([unichr_(i) for i in range(32,127)] +
## [unichr_(i) for i in range(161,172)] +
## [unichr_(i) for i in range(174,239)]))
many_glyphs = uempty.join([unichr_(i) for i in range(32,127)])
mglen = len(many_glyphs)
count = 0
access = 0
hit = 0
miss = 0
f = ft.Font(None, size=24, font_index=0, resolution=72, ucs4=False)
f.style = ft.STYLE_NORMAL
f.antialiased = True
# Ensure debug counters are zero
self.assertEqual(f._debug_cache_stats, (0, 0, 0, 0, 0))
# Load some basic glyphs
count = access = miss = glen
f.render_raw(glyphs)
self.assertEqual(f._debug_cache_stats, (count, 0, access, hit, miss))
# Vertical should not affect the cache
access += glen
hit += glen
f.vertical = True
f.render_raw(glyphs)
f.vertical = False
self.assertEqual(f._debug_cache_stats, (count, 0, access, hit, miss))
# New glyphs will
count += oglen
access += oglen
miss += oglen
f.render_raw(other_glyphs)
self.assertEqual(f._debug_cache_stats, (count, 0, access, hit, miss))
# Point size does
count += glen
access += glen
miss += glen
f.render_raw(glyphs, size=12)
self.assertEqual(f._debug_cache_stats, (count, 0, access, hit, miss))
# Underline style does not
access += oglen
hit += oglen
f.underline = True
f.render_raw(other_glyphs)
f.underline = False
self.assertEqual(f._debug_cache_stats, (count, 0, access, hit, miss))
# Oblique style does
count += glen
access += glen
miss += glen
f.oblique = True
f.render_raw(glyphs)
f.oblique = False
self.assertEqual(f._debug_cache_stats, (count, 0, access, hit, miss))
# Strong style does; by this point cache clears can happen
count += glen
access += glen
miss += glen
f.strong = True
f.render_raw(glyphs)
f.strong = False
ccount, cdelete_count, caccess, chit, cmiss = f._debug_cache_stats
self.assertEqual((ccount + cdelete_count, caccess, chit, cmiss),
(count, access, hit, miss))
# Rotation does
count += glen
access += glen
miss += glen
f.render_raw(glyphs, rotation=10)
ccount, cdelete_count, caccess, chit, cmiss = f._debug_cache_stats
self.assertEqual((ccount + cdelete_count, caccess, chit, cmiss),
(count, access, hit, miss))
# aliased (mono) glyphs do
count += oglen
access += oglen
miss += oglen
f.antialiased = False
f.render_raw(other_glyphs)
f.antialiased = True
ccount, cdelete_count, caccess, chit, cmiss = f._debug_cache_stats
self.assertEqual((ccount + cdelete_count, caccess, chit, cmiss),
(count, access, hit, miss))
# Trigger a cleanup for sure.
count += 2 * mglen
access += 2 * mglen
miss += 2 * mglen
f.get_metrics(many_glyphs, size=8)
f.get_metrics(many_glyphs, size=10)
ccount, cdelete_count, caccess, chit, cmiss = f._debug_cache_stats
self.assertTrue(ccount < count)
self.assertEqual((ccount + cdelete_count, caccess, chit, cmiss),
(count, access, hit, miss))
try:
ft.Font._debug_cache_stats
except AttributeError:
del test_freetype_Font_cache
def test_undefined_character_code(self):
# To be consistent with pygame.font.Font, undefined codes
# are rendered as the undefined character, and has metrics
# of None.
font = self._TEST_FONTS['sans']
img, size1 = font.render(unichr_(1), (0, 0, 0), size=24)
img, size0 = font.render("", (0, 0, 0), size=24)
self.assertTrue(size1.width > size0.width )
metrics = font.get_metrics(unichr_(1) + unichr_(48), size=24)
self.assertEqual(len(metrics), 2)
self.assertTrue(metrics[0] is None)
self.assertTrue(isinstance(metrics[1], tuple))
def test_issue_144(self):
"""Issue #144: unable to render text"""
# The bug came in two parts. The first was a convertion bug from
# FT_Fixed to integer in for an Intel x86_64 Pygame build. The second
# was to have the raised exception disappear before Font.render
# returned to Python level.
#
font = ft.Font(None, size=64)
s = 'M' * 100000 # Way too long for an SDL surface
self.assertRaises(pygame.error, font.render, s, (0, 0, 0))
def test_issue_242(self):
"""Issue #242: get_rect() uses 0 as default style"""
# Issue #242: freetype.Font.get_rect() ignores style defaults when
# the style argument is not given
#
# The text boundary rectangle returned by freetype.Font.get_rect()
# should match the boundary of the same text rendered directly to a
# surface. This permits accurate text positioning. To work properly,
# get_rect() should calculate the text boundary to reflect text style,
# such as underline. Instead, it ignores the style settings for the
# Font object when the style argument is omitted.
#
# When the style argument is not given, freetype.get_rect() uses
# unstyled text when calculating the boundary rectangle. This is
# because _ftfont_getrect(), in _freetype.c, set the default
# style to 0 rather than FT_STYLE_DEFAULT.
#
font = self._TEST_FONTS['sans']
# Try wide style on a wide character.
prev_style = font.wide
font.wide = True
try:
rect = font.get_rect('M', size=64)
surf, rrect = font.render(None, size=64)
self.assertEqual(rect, rrect)
finally:
font.wide = prev_style
# Try strong style on several wide characters.
prev_style = font.strong
font.strong = True
try:
rect = font.get_rect('Mm_', size=64)
surf, rrect = font.render(None, size=64)
self.assertEqual(rect, rrect)
finally:
font.strong = prev_style
# Try oblique style on a tall, narrow character.
prev_style = font.oblique
font.oblique = True
try:
rect = font.get_rect('|', size=64)
surf, rrect = font.render(None, size=64)
self.assertEqual(rect, rrect)
finally:
font.oblique = prev_style
# Try underline style on a glyphless character.
prev_style = font.underline
font.underline = True
try:
rect = font.get_rect(' ', size=64)
surf, rrect = font.render(None, size=64)
self.assertEqual(rect, rrect)
finally:
font.underline = prev_style
def test_issue_237(self):
"""Issue #237: Memory overrun when rendered with underlining"""
# Issue #237: Memory overrun when text without descenders is rendered
# with underlining
#
# The bug crashes the Python interpreter. The bug is caught with C
# assertions in ft_render_cb.c when the Pygame module is compiled
# for debugging. So far it is only known to affect Times New Roman.
#
name = "Times New Roman"
font = ft.SysFont(name, 19)
if font.name != name:
# The font is unavailable, so skip the test.
return
font.underline = True
s, r = font.render("Amazon", size=19)
# Some other checks to make sure nothing else broke.
for adj in [-2, -1.9, -1, 0, 1.9, 2]:
font.underline_adjustment = adj
s, r = font.render("Amazon", size=19)
def test_issue_243(self):
"""Issue Y: trailing space ignored in boundary calculation"""
# Issue #243: For a string with trailing spaces, freetype ignores the
# last space in boundary calculations
#
font = self._TEST_FONTS['fixed']
r1 = font.get_rect(" ", size=64)
self.assertTrue(r1.width > 1)
r2 = font.get_rect(" ", size=64)
self.assertEqual(r2.width, 2 * r1.width)
def test_garbage_collection(self):
"""Check reference counting on returned new references"""
def ref_items(seq):
return [weakref.ref(o) for o in seq]
font = self._TEST_FONTS['bmp-8-75dpi']
font.size = font.get_sizes()[0][0]
text = 'A'
rect = font.get_rect(text)
surf = pygame.Surface(rect.size, pygame.SRCALPHA, 32)
refs = []
refs.extend(ref_items(font.render(text, (0, 0, 0))))
refs.append(weakref.ref(font.render_to(surf, (0, 0), text, (0, 0, 0))))
refs.append(weakref.ref(font.get_rect(text)))
n = len(refs)
self.assertTrue(n > 0)
gc.collect()
for i in range(n):
self.assertTrue(refs[i]() is None, "ref %d not collected" % i)
try:
from sys import getrefcount
except ImportError:
pass
else:
array = arrinter.Array(rect.size, 'u', 1)
o = font.render_raw(text)
self.assertEqual(getrefcount(o), 2)
self.assertEqual(getrefcount(o[0]), 2)
self.assertEqual(getrefcount(o[1]), 2)
self.assertEqual(getrefcount(font.render_raw_to(array, text)), 1)
o = font.get_metrics('AB')
self.assertEqual(getrefcount(o), 2)
for i in range(len(o)):
self.assertEqual(getrefcount(o[i]), 2,
"refcount fail for item %d" % i)
o = font.get_sizes()
self.assertEqual(getrefcount(o), 2)
for i in range(len(o)):
self.assertEqual(getrefcount(o[i]), 2,
"refcount fail for item %d" % i)
class FreeTypeTest(unittest.TestCase):
def test_resolution(self):
was_init = ft.was_init()
if not was_init:
ft.init()
try:
ft.set_default_resolution()
resolution = ft.get_default_resolution()
self.assertEqual(resolution, 72)
new_resolution = resolution + 10
ft.set_default_resolution(new_resolution)
self.assertEqual(ft.get_default_resolution(), new_resolution)
ft.init(resolution=resolution+20)
self.assertEqual(ft.get_default_resolution(), new_resolution)
finally:
ft.set_default_resolution()
if was_init:
ft.quit()
def test_autoinit_and_autoquit(self):
pygame.init()
self.assertTrue(ft.was_init())
pygame.quit()
self.assertFalse(ft.was_init())
# Ensure autoquit is replaced at init time
pygame.init()
self.assertTrue(ft.was_init())
pygame.quit()
self.assertFalse(ft.was_init())
def test_cache_size(self):
DEFAULT_CACHE_SIZE = 64
ft.init()
self.assertEqual(ft.get_cache_size(), DEFAULT_CACHE_SIZE)
ft.quit()
self.assertEqual(ft.get_cache_size(), 0)
new_cache_size = DEFAULT_CACHE_SIZE * 2
ft.init(cache_size=new_cache_size)
self.assertEqual(ft.get_cache_size(), new_cache_size)
ft.quit()
if __name__ == '__main__':
unittest.main()
| mit | -7,316,808,656,908,135,000 | 39.375173 | 86 | 0.557496 | false |
jakereps/q2-feature-table | q2_feature_table/_group.py | 3 | 2250 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import biom
import qiime2
import numpy as np
def _collapse_factory(function):
def collapse_f(table, axis):
# axis is always the transpose of the original collapse axis
return np.array([function(x) for x in table.iter_data(axis=axis)])
return collapse_f
_mode_lookup = {
'sum': _collapse_factory(np.sum),
'median-ceiling': _collapse_factory(lambda x: np.ceil(np.median(x))),
'mean-ceiling': _collapse_factory(lambda x: np.ceil(np.mean(x)))
}
def _munge_metadata_column(mc, ids, axis):
mc = mc.filter_ids(ids)
# Check for empty values only after filtering down to relevant IDs.
missing = mc.get_ids(where_values_missing=True)
if missing:
raise ValueError("There are missing metadata column value(s) for "
"these %s ID(s): %s" %
(axis, ', '.join(repr(e) for e in sorted(missing))))
return mc
def group(table: biom.Table, axis: str,
metadata: qiime2.CategoricalMetadataColumn, mode: str) -> biom.Table:
if table.is_empty():
raise ValueError("Cannot group an empty table.")
if axis == 'feature':
biom_axis = 'observation'
else:
biom_axis = axis
metadata = _munge_metadata_column(metadata, table.ids(axis=biom_axis),
axis)
grouped_table = table.collapse(
lambda axis_id, _: metadata.get_value(axis_id),
collapse_f=_mode_lookup[mode],
axis=biom_axis,
norm=False,
include_collapsed_metadata=False)
# Reorder axis by first unique appearance of each group value in metadata
# (makes it stable for identity mappings and easier to test)
# TODO use CategoricalMetadataColumn API for retrieving categories/groups,
# when the API exists.
series = metadata.to_series()
return grouped_table.sort_order(series.unique(), axis=biom_axis)
| bsd-3-clause | -8,976,922,658,558,983,000 | 34.15625 | 79 | 0.602667 | false |
neumerance/cloudloon2 | .venv/lib/python2.7/site-packages/neutronclient/openstack/common/jsonutils.py | 7 | 6184 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
import types
import xmlrpclib
import six
from neutronclient.openstack.common import importutils
from neutronclient.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (types.NoneType, int, basestring, bool, float, long)
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
| apache-2.0 | 6,229,877,536,238,740,000 | 34.745665 | 79 | 0.642464 | false |
goace/personal-file-sharing-center | web/net.py | 6 | 6202 | """
Network Utilities
(from web.py)
"""
__all__ = [
"validipaddr", "validip6addr", "validipport", "validip", "validaddr",
"urlquote",
"httpdate", "parsehttpdate",
"htmlquote", "htmlunquote", "websafe",
]
import urllib, time
try: import datetime
except ImportError: pass
import re
import socket
def validip6addr(address):
"""
Returns True if `address` is a valid IPv6 address.
>>> validip6addr('::')
True
>>> validip6addr('aaaa:bbbb:cccc:dddd::1')
True
>>> validip6addr('1:2:3:4:5:6:7:8:9:10')
False
>>> validip6addr('12:10')
False
"""
"""
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error:
return False
"""
return False
def validipaddr(address):
"""
Returns True if `address` is a valid IPv4 address.
>>> validipaddr('192.168.1.1')
True
>>> validipaddr('192.168.1.800')
False
>>> validipaddr('192.168.1')
False
"""
try:
octets = address.split('.')
if len(octets) != 4:
return False
for x in octets:
if not (0 <= int(x) <= 255):
return False
except ValueError:
return False
return True
def validipport(port):
"""
Returns True if `port` is a valid IPv4 port.
>>> validipport('9000')
True
>>> validipport('foo')
False
>>> validipport('1000000')
False
"""
try:
if not (0 <= int(port) <= 65535):
return False
except ValueError:
return False
return True
def validip(ip, defaultaddr="0.0.0.0", defaultport=8080):
"""
Returns `(ip_address, port)` from string `ip_addr_port`
>>> validip('1.2.3.4')
('1.2.3.4', 8080)
>>> validip('80')
('0.0.0.0', 80)
>>> validip('192.168.0.1:85')
('192.168.0.1', 85)
>>> validip('::')
('::', 8080)
>>> validip('[::]:88')
('::', 88)
>>> validip('[::1]:80')
('::1', 80)
"""
addr = defaultaddr
port = defaultport
#Matt Boswell's code to check for ipv6 first
match = re.search(r'^\[([^]]+)\](?::(\d+))?$',ip) #check for [ipv6]:port
if match:
if validip6addr(match.group(1)):
if match.group(2):
if validipport(match.group(2)): return (match.group(1),int(match.group(2)))
else:
return (match.group(1),port)
else:
if validip6addr(ip): return (ip,port)
#end ipv6 code
ip = ip.split(":", 1)
if len(ip) == 1:
if not ip[0]:
pass
elif validipaddr(ip[0]):
addr = ip[0]
elif validipport(ip[0]):
port = int(ip[0])
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
elif len(ip) == 2:
addr, port = ip
if not validipaddr(addr) and validipport(port):
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
port = int(port)
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
return (addr, port)
def validaddr(string_):
"""
Returns either (ip_address, port) or "/path/to/socket" from string_
>>> validaddr('/path/to/socket')
'/path/to/socket'
>>> validaddr('8000')
('0.0.0.0', 8000)
>>> validaddr('127.0.0.1')
('127.0.0.1', 8080)
>>> validaddr('127.0.0.1:8000')
('127.0.0.1', 8000)
>>> validip('[::1]:80')
('::1', 80)
>>> validaddr('fff')
Traceback (most recent call last):
...
ValueError: fff is not a valid IP address/port
"""
if '/' in string_:
return string_
else:
return validip(string_)
def urlquote(val):
"""
Quotes a string for use in a URL.
>>> urlquote('://?f=1&j=1')
'%3A//%3Ff%3D1%26j%3D1'
>>> urlquote(None)
''
>>> urlquote(u'\u203d')
'%E2%80%BD'
"""
if val is None: return ''
if not isinstance(val, unicode): val = str(val)
else: val = val.encode('utf-8')
return urllib.quote(val)
def httpdate(date_obj):
"""
Formats a datetime object for use in HTTP headers.
>>> import datetime
>>> httpdate(datetime.datetime(1970, 1, 1, 1, 1, 1))
'Thu, 01 Jan 1970 01:01:01 GMT'
"""
return date_obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
def parsehttpdate(string_):
"""
Parses an HTTP date into a datetime object.
>>> parsehttpdate('Thu, 01 Jan 1970 01:01:01 GMT')
datetime.datetime(1970, 1, 1, 1, 1, 1)
"""
try:
t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
return None
return datetime.datetime(*t[:6])
def htmlquote(text):
r"""
Encodes `text` for raw use in HTML.
>>> htmlquote(u"<'&\">")
u'<'&">'
"""
text = text.replace(u"&", u"&") # Must be done first!
text = text.replace(u"<", u"<")
text = text.replace(u">", u">")
text = text.replace(u"'", u"'")
text = text.replace(u'"', u""")
return text
def htmlunquote(text):
r"""
Decodes `text` that's HTML quoted.
>>> htmlunquote(u'<'&">')
u'<\'&">'
"""
text = text.replace(u""", u'"')
text = text.replace(u"'", u"'")
text = text.replace(u">", u">")
text = text.replace(u"<", u"<")
text = text.replace(u"&", u"&") # Must be done last!
return text
def websafe(val):
r"""Converts `val` so that it is safe for use in Unicode HTML.
>>> websafe("<'&\">")
u'<'&">'
>>> websafe(None)
u''
>>> websafe(u'\u203d')
u'\u203d'
>>> websafe('\xe2\x80\xbd')
u'\u203d'
"""
if val is None:
return u''
elif isinstance(val, str):
val = val.decode('utf-8')
elif not isinstance(val, unicode):
val = unicode(val)
return htmlquote(val)
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-2.0 | -3,697,521,482,745,772,000 | 24.211382 | 91 | 0.506611 | false |
dansok/algos | crypto.py | 1 | 1334 | def isCryptSolution(crypt, solution):
mapping = {}
for s in solution:
mapping.setdefault(s[0], s[1])
def decrypt(crypt):
if len(crypt) == 1:
return int(mapping[crypt]), True
dec = ''
is_valid = True
for i, c in enumerate(crypt):
d = mapping[c]
dec += d
if i == 0 and d == '0':
is_valid = False
break
return int(dec), is_valid
n0, is_valid = decrypt(crypt[0])
if not is_valid:
return False
n1, is_valid = decrypt(crypt[1])
if not is_valid:
return False
n2, is_valid = decrypt(crypt[2])
if not is_valid:
return False
return n0 + n1 == n2
def main():
crypt = ["SEND", "MORE", "MONEY"]
solution = [['O', '0'],
['M', '1'],
['Y', '2'],
['E', '5'],
['N', '6'],
['D', '7'],
['R', '8'],
['S', '9']]
print isCryptSolution(crypt, solution)
if __name__ == "__main__":
main()
| gpl-3.0 | -2,452,991,700,870,470,700 | 25.68 | 56 | 0.344828 | false |
Ritsyy/fjord | fjord/base/tests/test_domain.py | 5 | 1541 | from fjord.base.domain import get_domain
from fjord.base.tests import TestCase
class TestGetDomain(TestCase):
def test_get_domain(self):
testdata = [
# URLs with good domains
(u'foo.example.com', u'example.com'),
(u'http://example.com/', u'example.com'),
(u'http://foo.example.com', u'example.com'),
(u'http://foo.example.com/', u'example.com'),
(u'http://foo.example.com:8000/bar/?foo=bar#foobar',
u'example.com'),
(u'https://foo.bar.baz.example.com/', u'example.com'),
(u'example.com.br', u'example.com.br'),
(u'foo.example.com.br', u'example.com.br'),
(u'https://foo.example.com.br/', u'example.com.br'),
(u'http://blog.goo.ne.jp/shinsburger', u'goo.ne.jp'),
# FIXME - This fails in the tests. See if this works when we
# change the code to get the most recent tld list from Mozilla.
# (u'http://500px.com\u6253\u4e0d\u5f00/',
# u'500px.com\u6253\u4e0d\u5f00'),
# URLs with domains we don't like
(None, u''),
(u'', u''),
(u'about:home', u''),
(u'chrome://whatever', u''),
(u'127.0.0.1', u''),
(u'ftp://[email protected]', u''),
(u'155.39.97.145.in-addr.arpa', u''),
(u'example.mil', u''),
(u'localhost', u''),
]
for data, expected in testdata:
assert get_domain(data) == expected
| bsd-3-clause | -6,361,642,283,630,301,000 | 38.512821 | 75 | 0.508761 | false |
kevinlondon/youtube-dl | youtube_dl/extractor/vidme.py | 15 | 5043 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
parse_iso8601,
)
class VidmeIE(InfoExtractor):
_VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]+)'
_TESTS = [{
'url': 'https://vid.me/QNB',
'md5': 'c62f1156138dc3323902188c5b5a8bd6',
'info_dict': {
'id': 'QNB',
'ext': 'mp4',
'title': 'Fishing for piranha - the easy way',
'description': 'source: https://www.facebook.com/photo.php?v=312276045600871',
'thumbnail': 're:^https?://.*\.jpg',
'timestamp': 1406313244,
'upload_date': '20140725',
'age_limit': 0,
'duration': 119.92,
'view_count': int,
'like_count': int,
'comment_count': int,
},
}, {
'url': 'https://vid.me/Gc6M',
'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
'info_dict': {
'id': 'Gc6M',
'ext': 'mp4',
'title': 'O Mere Dil ke chain - Arnav and Khushi VM',
'thumbnail': 're:^https?://.*\.jpg',
'timestamp': 1441211642,
'upload_date': '20150902',
'uploader': 'SunshineM',
'uploader_id': '3552827',
'age_limit': 0,
'duration': 223.72,
'view_count': int,
'like_count': int,
'comment_count': int,
},
'params': {
'skip_download': True,
},
}, {
# tests uploader field
'url': 'https://vid.me/4Iib',
'info_dict': {
'id': '4Iib',
'ext': 'mp4',
'title': 'The Carver',
'description': 'md5:e9c24870018ae8113be936645b93ba3c',
'thumbnail': 're:^https?://.*\.jpg',
'timestamp': 1433203629,
'upload_date': '20150602',
'uploader': 'Thomas',
'uploader_id': '109747',
'age_limit': 0,
'duration': 97.859999999999999,
'view_count': int,
'like_count': int,
'comment_count': int,
},
'params': {
'skip_download': True,
},
}, {
# nsfw test from http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching
'url': 'https://vid.me/e/Wmur',
'info_dict': {
'id': 'Wmur',
'ext': 'mp4',
'title': 'naked smoking & stretching',
'thumbnail': 're:^https?://.*\.jpg',
'timestamp': 1430931613,
'upload_date': '20150506',
'uploader': 'naked-yogi',
'uploader_id': '1638622',
'age_limit': 18,
'duration': 653.26999999999998,
'view_count': int,
'like_count': int,
'comment_count': int,
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
try:
response = self._download_json(
'https://api.vid.me/videoByUrl/%s' % video_id, video_id)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
response = self._parse_json(e.cause.read(), video_id)
else:
raise
error = response.get('error')
if error:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error), expected=True)
video = response['video']
formats = [{
'format_id': f.get('type'),
'url': f['uri'],
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
} for f in video.get('formats', []) if f.get('uri')]
self._sort_formats(formats)
title = video['title']
description = video.get('description')
thumbnail = video.get('thumbnail_url')
timestamp = parse_iso8601(video.get('date_created'), ' ')
uploader = video.get('user', {}).get('username')
uploader_id = video.get('user', {}).get('user_id')
age_limit = 18 if video.get('nsfw') is True else 0
duration = float_or_none(video.get('duration'))
view_count = int_or_none(video.get('view_count'))
like_count = int_or_none(video.get('likes_count'))
comment_count = int_or_none(video.get('comment_count'))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'uploader_id': uploader_id,
'age_limit': age_limit,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'comment_count': comment_count,
'formats': formats,
}
| unlicense | 1,919,706,753,337,305,300 | 32.397351 | 96 | 0.482848 | false |
ostash/qt-creator-i18n-uk | src/libs/3rdparty/botan/doc/scripts/comba.py | 14 | 1673 | #!/usr/bin/python
import sys
def comba_indexes(N):
indexes = []
for i in xrange(0, 2*N):
x = []
for j in xrange(max(0, i-N+1), min(N, i+1)):
x += [(j,i-j)]
indexes += [sorted(x)]
return indexes
def comba_sqr_indexes(N):
indexes = []
for i in xrange(0, 2*N):
x = []
for j in xrange(max(0, i-N+1), min(N, i+1)):
if j < i-j:
x += [(j,i-j)]
else:
x += [(i-j,j)]
indexes += [sorted(x)]
return indexes
def comba_multiply_code(N):
indexes = comba_indexes(N)
for (i,idx) in zip(range(0, len(indexes)), indexes):
for pair in idx:
print "word3_muladd(&w2, &w1, &w0, x[%2d], y[%2d]);" % (pair)
print "z[%2d] = w0; w0 = w1; w1 = w2; w2 = 0;" % (i)
def comba_square_code(N):
indexes = comba_sqr_indexes(N)
for (rnd,idx) in zip(range(0, len(indexes)), indexes):
for (i,pair) in zip(range(0, len(idx)), idx):
if pair[0] == pair[1]:
print " word3_muladd(&w2, &w1, &w0, x[%2d], x[%2d]);" % (pair)
elif i % 2 == 0:
print " word3_muladd_2(&w2, &w1, &w0, x[%2d], x[%2d]);" % (pair[0], pair[1])
if rnd < len(idx)-2:
print " z[%2d] = w0; w0 = w1; w1 = w2; w2 = 0;\n" % (rnd)
elif rnd == len(idx)-1:
print " z[%2d] = w0;\n" % (rnd)
else:
print " z[%2d] = w1;\n" % (rnd)
def main(args = None):
if args is None:
args = sys.argv
#comba_square_code(int(args[1]))
comba_multiply_code(int(args[1]))
if __name__ == '__main__':
sys.exit(main())
| lgpl-2.1 | -4,192,430,556,457,710,000 | 24.738462 | 94 | 0.451883 | false |
zerodayz/citellus | tests/plugins-unit-tests/test_executable_bit.py | 1 | 1158 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2018, 2019, 2020 Pablo Iranzo Gómez <[email protected]>
import os
import sys
from unittest import TestCase
sys.path.append(os.path.abspath(os.path.dirname(__file__) + "/" + "../"))
import citellusclient.shell as citellus
testplugins = os.path.join(citellus.citellusdir, "plugins", "test")
citellusdir = citellus.citellusdir
class CitellusTest(TestCase):
def test_plugins_have_executable_bit(self):
pluginpath = [os.path.join(citellus.citellusdir, "plugins", "core")]
plugins = []
for folder in pluginpath:
for root, dirnames, filenames in os.walk(folder, followlinks=True):
for filename in filenames:
filepath = os.path.join(root, filename)
if ".citellus_tests" not in filepath:
plugins.append(filepath)
plugins = sorted(set(plugins))
pluginscit = []
for plugin in citellus.findplugins(folders=pluginpath):
pluginscit.append(plugin["plugin"])
pluginscit = sorted(set(pluginscit))
assert plugins == pluginscit
| gpl-3.0 | 901,884,395,680,482,400 | 32.057143 | 79 | 0.636128 | false |
davidzchen/tensorflow | tensorflow/python/ops/control_flow_grad.py | 8 | 9421 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in control_flow_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.control_flow_ops import *
# pylint: enable=wildcard-import
def _SwitchGrad(op, *grad):
"""Gradients for a Switch op is calculated using a Merge op.
If the switch is a loop switch, it will be visited twice. We create
the merge on the first visit, and update the other input of the merge
on the second visit. A next_iteration is also added on second visit.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
merge_grad = grad_ctxt.grad_state.switch_map.get(op)
if merge_grad is not None:
# This is the second time this Switch is visited. It comes from
# the non-exit branch of the Switch, so update the second input
# to the Merge.
# TODO(yuanbyu): Perform shape inference with this new input.
if grad[1] is not None:
# pylint: disable=protected-access
control_flow_ops._AddNextAndBackEdge(merge_grad, grad[1],
enforce_shape_invariant=False)
# pylint: enable=protected-access
return None, None
elif grad[0] is not None:
# This is the first time this Switch is visited. It comes from
# the Exit branch, which is grad[0]. grad[1] is empty at this point.
# Use grad[0] for both inputs to merge for now, but update the second
# input of merge when we see this Switch the second time.
merge_grad = merge([grad[0], grad[0]], name="b_switch")[0]
grad_ctxt.grad_state.switch_map[op] = merge_grad
return merge_grad, None
else:
# This is the first time this Switch is visited. It comes from the
# Identity branch. Such a Switch has `None` gradient for the Exit branch,
# meaning the output is not differentiable.
return None, None
elif isinstance(op_ctxt, CondContext):
zero_grad = grad[1 - op_ctxt.branch]
# At this point, we have created zero_grad guarded by the right switch.
# Unfortunately, we may still get None here for not trainable data types.
if zero_grad is None:
# For resource variables we get None always on the other branch, so bypass
# this.
if op.inputs[0].dtype == dtypes.resource:
return merge(
[grad[op_ctxt.branch]] * 2, name="cond_resource_grad")[0], None
return None, None
return merge(grad, name="cond_grad")[0], None
else:
false_grad = switch(grad[0], op.inputs[1])[0]
true_grad = switch(grad[1], op.inputs[1])[1]
return merge([false_grad, true_grad])[0], None
ops.RegisterGradient("Switch")(_SwitchGrad)
ops.RegisterGradient("RefSwitch")(_SwitchGrad)
@ops.RegisterGradient("Merge")
def _MergeGrad(op, grad, _):
"""Gradients for a Merge op are calculated using a Switch op."""
input_op = op.inputs[0].op
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = control_flow_util.GetOutputContext(input_op)
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot)
# pylint: enable=protected-access
elif isinstance(op_ctxt, CondContext):
pred = op_ctxt.pred
if grad_ctxt and grad_ctxt.grad_state:
# This Merge node is part of a cond within a loop.
# The backprop needs to have the value of this predicate for every
# iteration. So we must have its values accumulated in the forward, and
# use the accumulated values as the predicate for this backprop switch.
grad_state = grad_ctxt.grad_state
real_pred = grad_state.history_map.get(pred.name)
if real_pred is None:
# Remember the value of pred for every iteration.
grad_ctxt = grad_state.grad_context
grad_ctxt.Exit()
history_pred = grad_state.AddForwardAccumulator(pred)
grad_ctxt.Enter()
# Add the stack pop op. If pred.op is in a (outer) CondContext,
# the stack pop will be guarded with a switch.
real_pred = grad_state.AddBackpropAccumulatedValue(history_pred, pred)
grad_state.history_map[pred.name] = real_pred
pred = real_pred
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, pred, name="cond_grad")
# pylint: enable=protected-access
else:
num_inputs = len(op.inputs)
cond = [math_ops.equal(op.outputs[1], i) for i in xrange(num_inputs)]
# pylint: disable=protected-access
return [control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1]
for i in xrange(num_inputs)]
# pylint: enable=protected-access
@ops.RegisterGradient("RefMerge")
def _RefMergeGrad(op, grad, _):
return _MergeGrad(op, grad, _)
@ops.RegisterGradient("Exit")
def _ExitGrad(op, grad):
"""Gradients for an exit op are calculated using an Enter op."""
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if not grad_ctxt.back_prop:
# The flag `back_prop` is set by users to suppress gradient
# computation for this loop. If the attribute `back_prop` is false,
# no gradient computation.
return None
if op_ctxt.grad_state:
raise TypeError("Second-order gradient for while loops not supported.")
if isinstance(grad, ops.Tensor):
grad_ctxt.AddName(grad.name)
else:
if not isinstance(grad, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(grad))
grad_ctxt.AddName(grad.values.name)
grad_ctxt.AddName(grad.indices.name)
dense_shape = grad.dense_shape
if dense_shape is not None:
grad_ctxt.AddName(dense_shape.name)
grad_ctxt.Enter()
# pylint: disable=protected-access
result = control_flow_ops._Enter(
grad, grad_ctxt.name, is_constant=False,
parallel_iterations=grad_ctxt.parallel_iterations,
name="b_exit")
# pylint: enable=protected-access
grad_ctxt.loop_enters.append(result)
grad_ctxt.Exit()
return result
ops.RegisterGradient("RefExit")(_ExitGrad)
@ops.RegisterGradient("NextIteration")
def _NextIterationGrad(_, grad):
"""A forward next_iteration is translated into a backprop identity.
Note that the backprop next_iteration is added in switch grad.
"""
return grad
@ops.RegisterGradient("RefNextIteration")
def _RefNextIterationGrad(_, grad):
return _NextIterationGrad(_, grad)
@ops.RegisterGradient("Enter")
def _EnterGrad(op, grad):
"""Gradients for an Enter are calculated using an Exit op.
For loop variables, grad is the gradient so just add an exit.
For loop invariants, we need to add an accumulator loop.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if grad_ctxt is None:
return grad
if not grad_ctxt.back_prop:
# Skip gradient computation, if the attribute `back_prop` is false.
return grad
if grad_ctxt.grad_state is None:
# Pass the gradient through if we are not in a gradient while context.
return grad
if op.get_attr("is_constant"):
# Add a gradient accumulator for each loop invariant.
if isinstance(grad, ops.Tensor):
result = grad_ctxt.AddBackpropAccumulator(op, grad)
elif isinstance(grad, ops.IndexedSlices):
result = grad_ctxt.AddBackpropIndexedSlicesAccumulator(op, grad)
else:
# TODO(yuanbyu, lukasr): Add support for SparseTensor.
raise TypeError("Type %s not supported" % type(grad))
else:
result = exit(grad)
grad_ctxt.loop_exits.append(result)
grad_ctxt.ExitResult([result])
return result
@ops.RegisterGradient("RefEnter")
def _RefEnterGrad(op, grad):
return _EnterGrad(op, grad)
@ops.RegisterGradient("LoopCond")
def _LoopCondGrad(_):
"""Stop backprop for the predicate of a while loop."""
return None
| apache-2.0 | 8,617,369,318,038,735,000 | 37.453061 | 80 | 0.696741 | false |
mdinacci/rtw | editor/src/gui/qt/plugins/assetsbrowserplugin.py | 1 | 1076 | # -*- coding: utf-8-*-
"""
Author: Marco Dinacci <[email protected]>
Copyright © 2008-2009
"""
from PyQt4 import QtGui, QtDesigner
from assetsbrowser import AssetsBrowser
class AssetBrowserPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin):
def __init__(self, parent=None):
QtDesigner.QPyDesignerCustomWidgetPlugin.__init__(self)
self.initialized = False
def initialize(self, core):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return AssetsBrowser(parent)
def name(self):
return "AssetsBrowser"
def group(self):
return "Game editor"
def toolTip(self):
return ""
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return '<widget class="AssetsBrowser" name=\"assetsBrowser\" />\n'
def includeFile(self):
return "assetsbrowser"
| mit | 1,578,893,334,417,715,700 | 21.87234 | 74 | 0.616744 | false |
aselle/tensorflow | tensorflow/contrib/autograph/operators/data_structures.py | 7 | 11328 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators specific to data structures: list append, subscripts, etc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import tensor_array_ops
# TODO(mdan): Once control flow supports objects, repackage as a class.
def new_list(iterable=None):
"""The list constructor.
Args:
iterable: Optional elements to fill the list with.
Returns:
A list-like object. The exact return value depends on the initial elements.
"""
if iterable:
elements = tuple(iterable)
else:
elements = ()
if elements:
# When the list contains elements, it is assumed to be a "Python" lvalue
# list.
return _py_list_new(elements)
return tf_tensor_list_new(elements)
def tf_tensor_array_new(elements, element_dtype=None, element_shape=None):
"""Overload of new_list that stages a Tensor list creation."""
elements = tuple(ops.convert_to_tensor(el) for el in elements)
all_dtypes = set(el.dtype for el in elements)
if len(all_dtypes) == 1:
inferred_dtype, = tuple(all_dtypes)
if element_dtype is not None and element_dtype != inferred_dtype:
raise ValueError(
'incompatible dtype; specified: {}, inferred from {}: {}'.format(
element_dtype, elements, inferred_dtype))
elif len(all_dtypes) > 1:
raise ValueError(
'TensorArray requires all elements to have the same dtype:'
' {}'.format(elements))
else:
if element_dtype is None:
raise ValueError('dtype is required to create an empty TensorArray')
all_shapes = set(tuple(el.shape.as_list()) for el in elements)
if len(all_shapes) == 1:
inferred_shape, = tuple(all_shapes)
if element_shape is not None and element_shape != inferred_shape:
raise ValueError(
'incompatible shape; specified: {}, inferred from {}: {}'.format(
element_shape, elements, inferred_shape))
elif len(all_shapes) > 1:
raise ValueError(
'TensorArray requires all elements to have the same shape:'
' {}'.format(elements))
# TODO(mdan): We may want to allow different shapes with infer_shape=False.
else:
inferred_shape = None
if element_dtype is None:
element_dtype = inferred_dtype
if element_shape is None:
element_shape = inferred_shape
l = tensor_array_ops.TensorArray(
dtype=element_dtype,
size=len(elements),
dynamic_size=True,
infer_shape=(element_shape is None),
element_shape=element_shape)
for i, el in enumerate(elements):
l = l.write(i, el)
return l
def tf_tensor_list_new(elements, element_dtype=None, element_shape=None):
"""Overload of new_list that stages a Tensor list creation."""
elements = tuple(ops.convert_to_tensor(el) for el in elements)
all_dtypes = set(el.dtype for el in elements)
if len(all_dtypes) == 1:
inferred_dtype = tuple(all_dtypes)[0]
if element_dtype is not None and element_dtype != inferred_dtype:
raise ValueError(
'incompatible dtype; specified: {}, inferred from {}: {}'.format(
element_dtype, elements, inferred_dtype))
else:
# Heterogeneous lists are ok.
if element_dtype is not None:
raise ValueError(
'specified dtype {} is inconsistent with that of elements {}'.format(
element_dtype, elements))
inferred_dtype = dtypes.variant
all_shapes = set(tuple(el.shape.as_list()) for el in elements)
if len(all_shapes) == 1:
inferred_shape = array_ops.shape(elements[0])
if element_shape is not None and element_shape != inferred_shape:
raise ValueError(
'incompatible shape; specified: {}, inferred from {}: {}'.format(
element_shape, elements, inferred_shape))
else:
# Heterogeneous lists are ok.
if element_shape is not None:
raise ValueError(
'specified shape {} is inconsistent with that of elements {}'.format(
element_shape, elements))
inferred_shape = constant_op.constant(-1) # unknown shape, by convention
if element_dtype is None:
element_dtype = inferred_dtype
if element_shape is None:
element_shape = inferred_shape
l = list_ops.empty_tensor_list(
element_shape=element_shape, element_dtype=element_dtype)
for el in elements:
l = list_ops.tensor_list_push_back(l, el)
return l
def _py_list_new(elements):
"""Overload of new_list that creates a Python list."""
return list(elements)
def list_append(list_, x):
"""The list append function.
Note: it is unspecified where list_ will be mutated or not. If list_ is
a TensorFlow entity, it will not be typically mutated. If list_ is a plain
list, it will be. In general, if the list is mutated then the return value
should point to the original entity.
Args:
list_: An entity that supports append semantics.
x: The element to append.
Returns:
Same as list_, after the append was performed.
Raises:
ValueError: if list_ is not of a known list-like type.
"""
if isinstance(list_, tensor_array_ops.TensorArray):
return _tf_tensorarray_append(list_, x)
elif tensor_util.is_tensor(list_):
if list_.dtype == dtypes.variant:
return _tf_tensor_list_append(list_, x)
else:
raise ValueError(
'tensor lists are expected to be Tensors with dtype=tf.variant,'
' instead found %s' % list_)
else:
return _py_list_append(list_, x)
def _tf_tensor_list_append(list_, x):
"""Overload of list_append that stages a Tensor list write."""
def empty_list_of_elements_like_x():
tensor_x = ops.convert_to_tensor(x)
return list_ops.empty_tensor_list(
element_shape=array_ops.shape(tensor_x),
element_dtype=tensor_x.dtype)
list_ = control_flow_ops.cond(
list_ops.tensor_list_length(list_) > 0,
lambda: list_,
empty_list_of_elements_like_x,
)
return list_ops.tensor_list_push_back(list_, x)
def _tf_tensorarray_append(list_, x):
"""Overload of list_append that stages a TensorArray write."""
return list_.write(list_.size(), x)
def _py_list_append(list_, x):
"""Overload of list_append that executes a Python list append."""
# Revert to the original call.
list_.append(x)
return list_
class ListPopOpts(
collections.namedtuple('ListPopOpts', ('element_dtype', 'element_shape'))):
pass
def list_pop(list_, i, opts):
"""The list pop function.
Note: it is unspecified where list_ will be mutated or not. If list_ is
a TensorFlow entity, it will not be typically mutated. If list_ is a plain
list, it will be. In general, if the list is mutated then the return value
should point to the original entity.
Args:
list_: An entity that supports pop semantics.
i: Optional index to pop from. May be None.
opts: A ListPopOpts.
Returns:
Tuple (x, out_list_):
out_list_: same as list_, after the removal was performed.
x: the removed element value.
Raises:
ValueError: if list_ is not of a known list-like type or the operation is
not supported for that type.
"""
assert isinstance(opts, ListPopOpts)
if isinstance(list_, tensor_array_ops.TensorArray):
raise ValueError('TensorArray does not support item removal')
elif tensor_util.is_tensor(list_):
if list_.dtype == dtypes.variant:
return _tf_tensor_list_pop(list_, i, opts)
else:
raise ValueError(
'tensor lists are expected to be Tensors with dtype=tf.variant,'
' instead found %s' % list_)
else:
return _py_list_pop(list_, i)
def _tf_tensor_list_pop(list_, i, opts):
"""Overload of list_pop that stages a Tensor list pop."""
if i is not None:
raise NotImplementedError('tensor lists only support removing from the end')
if opts.element_dtype is None:
raise ValueError('cannot pop from a list without knowing its element '
'type; use set_element_type to annotate it')
if opts.element_shape is None:
raise ValueError('cannot pop from a list without knowing its element '
'shape; use set_element_type to annotate it')
list_out, x = list_ops.tensor_list_pop_back(
list_, element_dtype=opts.element_dtype)
x.set_shape(opts.element_shape)
return list_out, x
def _py_list_pop(list_, i):
"""Overload of list_pop that executes a Python list append."""
if i is None:
x = list_.pop()
else:
x = list_.pop(i)
return list_, x
# TODO(mdan): Look into reducing duplication between all these containers.
class ListStackOpts(
collections.namedtuple('ListStackOpts',
('element_dtype', 'original_call'))):
pass
def list_stack(list_, opts):
"""The list stack function.
This does not have a direct correspondent in Python. The closest idiom to
this is tf.append or np.stack. It's different from those in the sense that it
accepts a Tensor list, rather than a list of tensors. It can also accept
TensorArray. When the target is anything else, the dispatcher will rely on
ctx.original_call for fallback.
Args:
list_: An entity that supports append semantics.
opts: A ListStackOpts object.
Returns:
The output of the stack operation, typically a Tensor.
"""
assert isinstance(opts, ListStackOpts)
if isinstance(list_, tensor_array_ops.TensorArray):
return _tf_tensorarray_stack(list_)
elif tensor_util.is_tensor(list_):
if list_.dtype == dtypes.variant:
return _tf_tensor_list_stack(list_, opts)
else:
# No-op for primitive Tensor arguments.
return list_
else:
return _py_list_stack(list_, opts)
def _tf_tensorarray_stack(list_):
"""Overload of list_stack that stages a TensorArray stack."""
return list_.stack()
def _tf_tensor_list_stack(list_, opts):
"""Overload of list_stack that stages a Tensor list write."""
if opts.element_dtype is None:
raise ValueError('cannot stack a list without knowing its element type;'
' use set_element_type to annotate it')
return list_ops.tensor_list_stack(list_, element_dtype=opts.element_dtype)
def _py_list_stack(list_, opts):
"""Overload of list_stack that executes a Python list append."""
# Revert to the original call.
return opts.original_call(list_)
| apache-2.0 | -5,471,634,621,851,008,000 | 32.514793 | 80 | 0.680879 | false |
m-kuhn/QGIS | tests/src/python/test_qgsfieldmodel.py | 17 | 11940 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsFieldModel
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '14/11/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.core import (QgsField,
QgsFields,
QgsVectorLayer,
QgsFieldModel)
from qgis.PyQt.QtCore import QVariant, Qt
from qgis.testing import start_app, unittest
start_app()
def create_layer():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer",
"addfeat", "memory")
assert layer.isValid()
return layer
def create_model():
l = create_layer()
m = QgsFieldModel()
m.setLayer(l)
return l, m
class TestQgsFieldModel(unittest.TestCase):
def testGettersSetters(self):
""" test model getters/setters """
l = create_layer()
m = QgsFieldModel()
self.assertFalse(m.layer())
m.setLayer(l)
self.assertEqual(m.layer(), l)
m.setAllowExpression(True)
self.assertTrue(m.allowExpression())
m.setAllowExpression(False)
self.assertFalse(m.allowExpression())
m.setAllowEmptyFieldName(True)
self.assertTrue(m.allowEmptyFieldName())
m.setAllowEmptyFieldName(False)
self.assertFalse(m.allowEmptyFieldName())
def testIndexFromName(self):
l, m = create_model()
i = m.indexFromName('fldtxt')
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 0)
i = m.indexFromName('fldint')
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 1)
i = m.indexFromName('not a field')
self.assertFalse(i.isValid())
# test with alias
i = m.indexFromName('text field')
self.assertFalse(i.isValid())
l.setFieldAlias(0, 'text field')
i = m.indexFromName('text field')
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 0)
i = m.indexFromName('int field')
self.assertFalse(i.isValid())
l.setFieldAlias(1, 'int field')
i = m.indexFromName('int field')
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 1)
# should be case insensitive
i = m.indexFromName('FLDTXT')
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 0)
i = m.indexFromName('FLDINT')
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 1)
# try with expression
m.setAllowExpression(True)
i = m.indexFromName('not a field')
# still not valid - needs expression set first
self.assertFalse(i.isValid())
m.setExpression('not a field')
i = m.indexFromName('not a field')
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 2)
# try with null
i = m.indexFromName(None)
self.assertFalse(i.isValid())
m.setAllowEmptyFieldName(True)
i = m.indexFromName(None)
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 0)
# when null is shown, all other rows should be offset
self.assertEqual(m.indexFromName('fldtxt').row(), 1)
self.assertEqual(m.indexFromName('fldint').row(), 2)
self.assertEqual(m.indexFromName('not a field').row(), 3)
self.assertEqual(m.indexFromName('FLDTXT').row(), 1)
self.assertEqual(m.indexFromName('FLDINT').row(), 2)
def testIsField(self):
l, m = create_model()
self.assertTrue(m.isField('fldtxt'))
self.assertTrue(m.isField('fldint'))
self.assertFalse(m.isField(None))
self.assertFalse(m.isField('an expression'))
def testRowCount(self):
l, m = create_model()
self.assertEqual(m.rowCount(), 2)
m.setAllowEmptyFieldName(True)
self.assertEqual(m.rowCount(), 3)
m.setAllowExpression(True)
m.setExpression('not a field')
self.assertEqual(m.rowCount(), 4)
m.setExpression('not a field')
self.assertEqual(m.rowCount(), 4)
m.setExpression('not a field 2')
self.assertEqual(m.rowCount(), 4)
m.removeExpression()
self.assertEqual(m.rowCount(), 3)
def testFieldNameRole(self):
l, m = create_model()
self.assertEqual(m.data(m.indexFromName('fldtxt'), QgsFieldModel.FieldNameRole), 'fldtxt')
self.assertEqual(m.data(m.indexFromName('fldint'), QgsFieldModel.FieldNameRole), 'fldint')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldNameRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldNameRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldNameRole))
m.setAllowEmptyFieldName(True)
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldNameRole))
def testExpressionRole(self):
l, m = create_model()
self.assertEqual(m.data(m.indexFromName('fldtxt'), QgsFieldModel.ExpressionRole), 'fldtxt')
self.assertEqual(m.data(m.indexFromName('fldint'), QgsFieldModel.ExpressionRole), 'fldint')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.ExpressionRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.ExpressionRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertEqual(m.data(m.indexFromName('an expression'), QgsFieldModel.ExpressionRole), 'an expression')
m.setAllowEmptyFieldName(True)
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.ExpressionRole))
def testFieldIndexRole(self):
l, m = create_model()
self.assertEqual(m.data(m.indexFromName('fldtxt'), QgsFieldModel.FieldIndexRole), 0)
self.assertEqual(m.data(m.indexFromName('fldint'), QgsFieldModel.FieldIndexRole), 1)
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldIndexRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldIndexRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldIndexRole))
m.setAllowEmptyFieldName(True)
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldIndexRole))
def testIsExpressionRole(self):
l, m = create_model()
self.assertFalse(m.data(m.indexFromName('fldtxt'), QgsFieldModel.IsExpressionRole))
self.assertFalse(m.data(m.indexFromName('fldint'), QgsFieldModel.IsExpressionRole))
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.IsExpressionRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.IsExpressionRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertTrue(m.data(m.indexFromName('an expression'), QgsFieldModel.IsExpressionRole))
m.setAllowEmptyFieldName(True)
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.IsExpressionRole))
def testExpressionValidityRole(self):
l, m = create_model()
self.assertTrue(m.data(m.indexFromName('fldtxt'), QgsFieldModel.ExpressionValidityRole))
self.assertTrue(m.data(m.indexFromName('fldint'), QgsFieldModel.ExpressionValidityRole))
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.ExpressionValidityRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.ExpressionValidityRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.ExpressionValidityRole))
m.setAllowEmptyFieldName(True)
self.assertTrue(m.data(m.indexFromName(None), QgsFieldModel.ExpressionValidityRole))
def testFieldTypeRole(self):
l, m = create_model()
self.assertEqual(m.data(m.indexFromName('fldtxt'), QgsFieldModel.FieldTypeRole), QVariant.String)
self.assertEqual(m.data(m.indexFromName('fldint'), QgsFieldModel.FieldTypeRole), QVariant.Int)
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldTypeRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldTypeRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldTypeRole))
m.setAllowEmptyFieldName(True)
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldTypeRole))
def testFieldOriginRole(self):
l, m = create_model()
self.assertEqual(m.data(m.indexFromName('fldtxt'), QgsFieldModel.FieldOriginRole), QgsFields.OriginProvider)
self.assertEqual(m.data(m.indexFromName('fldint'), QgsFieldModel.FieldOriginRole), QgsFields.OriginProvider)
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldOriginRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldOriginRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldOriginRole))
m.setAllowEmptyFieldName(True)
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldOriginRole))
def testIsEmptyRole(self):
l, m = create_model()
self.assertFalse(m.data(m.indexFromName('fldtxt'), QgsFieldModel.IsEmptyRole), QgsFields.OriginProvider)
self.assertFalse(m.data(m.indexFromName('fldint'), QgsFieldModel.IsEmptyRole), QgsFields.OriginProvider)
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.IsEmptyRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.IsEmptyRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.IsEmptyRole))
m.setAllowEmptyFieldName(True)
self.assertTrue(m.data(m.indexFromName(None), QgsFieldModel.IsEmptyRole))
def testDisplayRole(self):
l, m = create_model()
self.assertEqual(m.data(m.indexFromName('fldtxt'), Qt.DisplayRole), 'fldtxt')
self.assertEqual(m.data(m.indexFromName('fldint'), Qt.DisplayRole), 'fldint')
self.assertFalse(m.data(m.indexFromName('an expression'), Qt.DisplayRole))
self.assertFalse(m.data(m.indexFromName(None), Qt.DisplayRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertEqual(m.data(m.indexFromName('an expression'), Qt.DisplayRole), 'an expression')
m.setAllowEmptyFieldName(True)
self.assertFalse(m.data(m.indexFromName(None), Qt.DisplayRole))
def testFieldTooltip(self):
f = QgsField('my_string', QVariant.String, 'string')
self.assertEqual(QgsFieldModel.fieldToolTip(f), '<b>my_string</b><p>string</p>')
f.setAlias('my alias')
self.assertEqual(QgsFieldModel.fieldToolTip(f), '<b>my alias</b> (my_string)<p>string</p>')
f.setLength(20)
self.assertEqual(QgsFieldModel.fieldToolTip(f), '<b>my alias</b> (my_string)<p>string (20)</p>')
f = QgsField('my_real', QVariant.Double, 'real', 8, 3)
self.assertEqual(QgsFieldModel.fieldToolTip(f), '<b>my_real</b><p>real (8, 3)</p>')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 272,726,115,616,627,330 | 44.572519 | 116 | 0.672864 | false |
awd4/spnss | dag.py | 1 | 1293 | import argparse
import logging
import time
import learn
import spnss.knobs as knobs
import spnss.ss as ss
def run(name, t, cp=None, verbosity=1):
level = {0:logging.ERROR, 1:logging.WARNING, 2:logging.INFO, 3:logging.DEBUG}[verbosity]
logging.basicConfig(level=level)
# learn SPN
if cp is not None: knobs.cluster_penalty = cp
knobs.min_instances = 2
trn, vld, tst, schema = learn.load_data(name)
start = time.time()
net = ss.search_spn(learn.seed_network(trn, schema), trn, vld, t, 200)
#learn.smooth_network(net, vld, verbosity>0)
tst_llh = net.llh(tst)
vld_llh = net.llh(vld)
print name, '\tt: %.5f'%t, '\tcp: ', cp, '\ttime:%.1f'%(time.time()-start), '\tdag ', len(net.pot), 'va:%8.4f'%vld_llh, 'te:%8.4f'%tst_llh
return net, vld_llh, tst_llh
def main():
# parse command line arguments
parent_parsers = [learn.dataset_parser(), learn.t_cp_parser(), learn.vq_parser(), learn.io_parser()]
parser = argparse.ArgumentParser(description='Perform SPN structure search', parents=parent_parsers)
args = parser.parse_args()
import random
# random.seed(30)
verbosity = args.v - args.q
run(args.dataset, args.threshold, args.cluster_penalty, verbosity=verbosity)
if __name__ == '__main__':
main()
| mit | 2,819,069,031,912,137,700 | 27.108696 | 142 | 0.656613 | false |
maartenq/ansible | test/units/modules/network/f5/test_bigip_firewall_dos_profile.py | 10 | 3653 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_firewall_dos_profile import ApiParameters
from library.modules.bigip_firewall_dos_profile import ModuleParameters
from library.modules.bigip_firewall_dos_profile import ModuleManager
from library.modules.bigip_firewall_dos_profile import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_firewall_dos_profile import ApiParameters
from ansible.modules.network.f5.bigip_firewall_dos_profile import ModuleParameters
from ansible.modules.network.f5.bigip_firewall_dos_profile import ModuleManager
from ansible.modules.network.f5.bigip_firewall_dos_profile import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
description='my description',
threshold_sensitivity='low',
default_whitelist='whitelist1'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.description == 'my description'
assert p.threshold_sensitivity == 'low'
assert p.default_whitelist == '/Common/whitelist1'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='foo',
description='this is a description',
threshold_sensitivity='low',
default_whitelist='whitelist1',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['description'] == 'this is a description'
| gpl-3.0 | -8,748,369,577,308,399,000 | 32.209091 | 91 | 0.681905 | false |
Jorge-Rodriguez/ansible | lib/ansible/module_utils/common/process.py | 73 | 1504 | # Copyright (c) 2018, Ansible Project
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.common.file import is_executable
def get_bin_path(arg, required=False, opt_dirs=None):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true it produces an Exception
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
opt_dirs = [] if opt_dirs is None else opt_dirs
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
raise ValueError('Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
| gpl-3.0 | -2,400,948,177,671,686,700 | 33.976744 | 110 | 0.641622 | false |
laayis/yowsup | yowsup/layers/protocol_messages/protocolentities/message_text.py | 59 | 1450 | from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .message import MessageProtocolEntity
class TextMessageProtocolEntity(MessageProtocolEntity):
'''
<message t="{{TIME_STAMP}}" from="{{CONTACT_JID}}"
offline="{{OFFLINE}}" type="text" id="{{MESSAGE_ID}}" notify="{{NOTIFY_NAME}}">
<body>
{{MESSAGE_DATA}}
</body>
</message>
'''
def __init__(self, body, _id = None, _from = None, to = None, notify = None,
timestamp = None, participant = None, offline = None, retry = None):
super(TextMessageProtocolEntity, self).__init__("text",_id, _from, to, notify, timestamp, participant, offline, retry)
self.setBody(body)
def __str__(self):
out = super(TextMessageProtocolEntity, self).__str__()
out += "Body: %s\n" % self.body
return out
def setBody(self, body):
self.body = body
def getBody(self):
return self.body
def toProtocolTreeNode(self):
node = super(TextMessageProtocolEntity, self).toProtocolTreeNode()
bodyNode = ProtocolTreeNode("body", {}, None, self.body)
node.addChild(bodyNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = MessageProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = TextMessageProtocolEntity
entity.setBody(node.getChild("body").getData())
return entity
| gpl-3.0 | 8,283,155,729,518,271,000 | 36.179487 | 126 | 0.622759 | false |
DGrady/pandas | pandas/tests/scalar/test_timedelta.py | 3 | 27307 | """ test the scalar Timedelta """
import pytest
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(object):
_multiprocess_can_split_ = True
def setup_method(self, method):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
assert Timedelta(10, unit='d').value == expected
assert Timedelta(10.0, unit='d').value == expected
assert Timedelta('10 days').value == expected
assert Timedelta(days=10).value == expected
assert Timedelta(days=10.0).value == expected
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
assert Timedelta('10 days 00:00:10').value == expected
assert Timedelta(days=10, seconds=10).value == expected
assert Timedelta(days=10, milliseconds=10 * 1000).value == expected
assert (Timedelta(days=10, microseconds=10 * 1000 * 1000)
.value == expected)
# gh-8757: test construction with np dtypes
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1, npkwarg).astype(
'm8[ns]').view('i8')
assert Timedelta(**{pykwarg: npdtype(1)}).value == expected
# rounding cases
assert Timedelta(82739999850000).value == 82739999850000
assert ('0 days 22:58:59.999850' in str(Timedelta(82739999850000)))
assert Timedelta(123072001000000).value == 123072001000000
assert ('1 days 10:11:12.001' in str(Timedelta(123072001000000)))
# string conversion with/without leading zero
# GH 9570
assert Timedelta('0:00:00') == timedelta(hours=0)
assert Timedelta('00:00:00') == timedelta(hours=0)
assert Timedelta('-1:00:00') == -timedelta(hours=1)
assert Timedelta('-01:00:00') == -timedelta(hours=1)
# more strings & abbrevs
# GH 8190
assert Timedelta('1 h') == timedelta(hours=1)
assert Timedelta('1 hour') == timedelta(hours=1)
assert Timedelta('1 hr') == timedelta(hours=1)
assert Timedelta('1 hours') == timedelta(hours=1)
assert Timedelta('-1 hours') == -timedelta(hours=1)
assert Timedelta('1 m') == timedelta(minutes=1)
assert Timedelta('1.5 m') == timedelta(seconds=90)
assert Timedelta('1 minute') == timedelta(minutes=1)
assert Timedelta('1 minutes') == timedelta(minutes=1)
assert Timedelta('1 s') == timedelta(seconds=1)
assert Timedelta('1 second') == timedelta(seconds=1)
assert Timedelta('1 seconds') == timedelta(seconds=1)
assert Timedelta('1 ms') == timedelta(milliseconds=1)
assert Timedelta('1 milli') == timedelta(milliseconds=1)
assert Timedelta('1 millisecond') == timedelta(milliseconds=1)
assert Timedelta('1 us') == timedelta(microseconds=1)
assert Timedelta('1 micros') == timedelta(microseconds=1)
assert Timedelta('1 microsecond') == timedelta(microseconds=1)
assert Timedelta('1.5 microsecond') == Timedelta('00:00:00.000001500')
assert Timedelta('1 ns') == Timedelta('00:00:00.000000001')
assert Timedelta('1 nano') == Timedelta('00:00:00.000000001')
assert Timedelta('1 nanosecond') == Timedelta('00:00:00.000000001')
# combos
assert Timedelta('10 days 1 hour') == timedelta(days=10, hours=1)
assert Timedelta('10 days 1 h') == timedelta(days=10, hours=1)
assert Timedelta('10 days 1 h 1m 1s') == timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s') == -timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s') == -timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s 3us') == -timedelta(
days=10, hours=1, minutes=1, seconds=1, microseconds=3)
assert Timedelta('-10 days 1 h 1.5m 1s 3us'), -timedelta(
days=10, hours=1, minutes=1, seconds=31, microseconds=3)
# Currently invalid as it has a - on the hh:mm:dd part
# (only allowed on the days)
pytest.raises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
pytest.raises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
pytest.raises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assert_raises_regex(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assert_raises_regex(ValueError,
"unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assert_raises_regex(ValueError,
"cannot construct a Timedelta from the "
"passed arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# round-trip both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
assert Timedelta(td.value) == td
# str does not normally display nanos
if not td.nanoseconds:
assert Timedelta(str(td)) == td
assert Timedelta(td._repr_base(format='all')) == td
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
assert Timedelta(10.5, unit='s').value == expected
# offset
assert (to_timedelta(pd.offsets.Hour(2)) ==
Timedelta('0 days, 02:00:00'))
assert (Timedelta(pd.offsets.Hour(2)) ==
Timedelta('0 days, 02:00:00'))
assert (Timedelta(pd.offsets.Second(2)) ==
Timedelta('0 days, 00:00:02'))
# gh-11995: unicode
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
assert result == expected
assert (to_timedelta(pd.offsets.Hour(2)) ==
Timedelta(u'0 days, 02:00:00'))
pytest.raises(ValueError, lambda: Timedelta(u'foo bar'))
def test_overflow_on_construction(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
pytest.raises(OverflowError, pd.Timedelta, value)
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_repr(self):
assert (repr(Timedelta(10, unit='d')) ==
"Timedelta('10 days 00:00:00')")
assert (repr(Timedelta(10, unit='s')) ==
"Timedelta('0 days 00:00:10')")
assert (repr(Timedelta(10, unit='ms')) ==
"Timedelta('0 days 00:00:00.010000')")
assert (repr(Timedelta(-10, unit='ms')) ==
"Timedelta('-1 days +23:59:59.990000')")
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert (isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
assert td == np.timedelta64(td.value, 'ns')
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, 'ns')
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
assert td != td.to_pytimedelta()
def test_freq_conversion(self):
# truediv
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
assert result == td.value / float(86400 * 1e9)
result = td / np.timedelta64(1, 's')
assert result == td.value / float(1e9)
result = td / np.timedelta64(1, 'ns')
assert result == td.value
# floordiv
td = Timedelta('1 days 2 hours 3 ns')
result = td // np.timedelta64(1, 'D')
assert result == 1
result = td // np.timedelta64(1, 's')
assert result == 93600
result = td // np.timedelta64(1, 'ns')
assert result == td.value
def test_fields(self):
def check(value):
# that we are int/long like
assert isinstance(value, (int, compat.long))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
pytest.raises(AttributeError, lambda: rng.hours)
pytest.raises(AttributeError, lambda: rng.minutes)
pytest.raises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
assert abs(td) == Timedelta('13:48:48')
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta('0 days 13:48:48')
assert -Timedelta('-1 days, 10:11:12').value == 49728000000000
assert Timedelta('-1 days, 10:11:12').value == -49728000000000
rng = to_timedelta('-1 days, 10:11:12.100123456')
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
pytest.raises(AttributeError, lambda: rng.hours)
pytest.raises(AttributeError, lambda: rng.minutes)
pytest.raises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_nat_converters(self):
assert to_timedelta('nat', box=False).astype('int64') == iNaT
assert to_timedelta('nan', box=False).astype('int64') == iNaT
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
assert result == expected
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_numeric_conversions(self):
assert ct(0) == np.timedelta64(0, 'ns')
assert ct(10) == np.timedelta64(10, 'ns')
assert ct(10, unit='ns') == np.timedelta64(10, 'ns').astype('m8[ns]')
assert ct(10, unit='us') == np.timedelta64(10, 'us').astype('m8[ns]')
assert ct(10, unit='ms') == np.timedelta64(10, 'ms').astype('m8[ns]')
assert ct(10, unit='s') == np.timedelta64(10, 's').astype('m8[ns]')
assert ct(10, unit='d') == np.timedelta64(10, 'D').astype('m8[ns]')
def test_timedelta_conversions(self):
assert (ct(timedelta(seconds=1)) ==
np.timedelta64(1, 's').astype('m8[ns]'))
assert (ct(timedelta(microseconds=1)) ==
np.timedelta64(1, 'us').astype('m8[ns]'))
assert (ct(timedelta(days=1)) ==
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
assert r1 == s1
r2 = t2.round(freq)
assert r2 == s2
# invalid
for freq in ['Y', 'M', 'foobar']:
pytest.raises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
pytest.raises(ValueError, lambda: t1.round(freq))
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
for v in [pd.NaT, None, float('nan'), np.nan]:
assert not (v in td)
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
assert (v in td)
def test_identity(self):
td = Timedelta(10, unit='d')
assert isinstance(td, Timedelta)
assert isinstance(td, timedelta)
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
assert ct('10') == np.timedelta64(10, 'ns')
assert ct('10ns') == np.timedelta64(10, 'ns')
assert ct('100') == np.timedelta64(100, 'ns')
assert ct('100ns') == np.timedelta64(100, 'ns')
assert ct('1000') == np.timedelta64(1000, 'ns')
assert ct('1000ns') == np.timedelta64(1000, 'ns')
assert ct('1000NS') == np.timedelta64(1000, 'ns')
assert ct('10us') == np.timedelta64(10000, 'ns')
assert ct('100us') == np.timedelta64(100000, 'ns')
assert ct('1000us') == np.timedelta64(1000000, 'ns')
assert ct('1000Us') == np.timedelta64(1000000, 'ns')
assert ct('1000uS') == np.timedelta64(1000000, 'ns')
assert ct('1ms') == np.timedelta64(1000000, 'ns')
assert ct('10ms') == np.timedelta64(10000000, 'ns')
assert ct('100ms') == np.timedelta64(100000000, 'ns')
assert ct('1000ms') == np.timedelta64(1000000000, 'ns')
assert ct('-1s') == -np.timedelta64(1000000000, 'ns')
assert ct('1s') == np.timedelta64(1000000000, 'ns')
assert ct('10s') == np.timedelta64(10000000000, 'ns')
assert ct('100s') == np.timedelta64(100000000000, 'ns')
assert ct('1000s') == np.timedelta64(1000000000000, 'ns')
assert ct('1d') == conv(np.timedelta64(1, 'D'))
assert ct('-1d') == -conv(np.timedelta64(1, 'D'))
assert ct('1D') == conv(np.timedelta64(1, 'D'))
assert ct('10D') == conv(np.timedelta64(10, 'D'))
assert ct('100D') == conv(np.timedelta64(100, 'D'))
assert ct('1000D') == conv(np.timedelta64(1000, 'D'))
assert ct('10000D') == conv(np.timedelta64(10000, 'D'))
# space
assert ct(' 10000D ') == conv(np.timedelta64(10000, 'D'))
assert ct(' - 10000D ') == -conv(np.timedelta64(10000, 'D'))
# invalid
pytest.raises(ValueError, ct, '1foo')
pytest.raises(ValueError, ct, 'foo')
def test_full_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
assert ct('1days') == conv(d1)
assert ct('1days,') == conv(d1)
assert ct('- 1days,') == -conv(d1)
assert ct('00:00:01') == conv(np.timedelta64(1, 's'))
assert ct('06:00:01') == conv(np.timedelta64(6 * 3600 + 1, 's'))
assert ct('06:00:01.0') == conv(np.timedelta64(6 * 3600 + 1, 's'))
assert ct('06:00:01.01') == conv(np.timedelta64(
1000 * (6 * 3600 + 1) + 10, 'ms'))
assert (ct('- 1days, 00:00:01') ==
conv(-d1 + np.timedelta64(1, 's')))
assert (ct('1days, 06:00:01') ==
conv(d1 + np.timedelta64(6 * 3600 + 1, 's')))
assert (ct('1days, 06:00:01.01') ==
conv(d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
# invalid
pytest.raises(ValueError, ct, '- 1days, 00')
def test_overflow(self):
# GH 9442
s = Series(pd.date_range('20130101', periods=100000, freq='H'))
s[0] += pd.Timedelta('1s 1ms')
# mean
result = (s - s.min()).mean()
expected = pd.Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s)
).sum())
# the computation is converted to float so
# might be some loss of precision
assert np.allclose(result.value / 1000, expected.value / 1000)
# sum
pytest.raises(ValueError, lambda: (s - s.min()).sum())
s1 = s[0:10000]
pytest.raises(ValueError, lambda: (s1 - s1.min()).sum())
s2 = s[0:1000]
result = (s2 - s2.min()).sum()
def test_pickle(self):
v = Timedelta('1 days 10:11:12.0123456')
v_p = tm.round_trip_pickle(v)
assert v == v_p
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, 'D')
td = timedelta(days=1)
assert hash(v) == hash(td)
d = {td: 2}
assert d[v] == 2
tds = timedelta_range('1 second', periods=20)
assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds)
# python timedeltas drop ns resolution
ns_td = Timedelta(1, 'ns')
assert hash(ns_td) != hash(ns_td.to_pytimedelta())
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
assert min_td.value == np.iinfo(np.int64).min + 1
assert max_td.value == np.iinfo(np.int64).max
# Beyond lower limit, a NAT before the Overflow
assert isinstance(min_td - Timedelta(1, 'ns'), NaTType)
with pytest.raises(OverflowError):
min_td - Timedelta(2, 'ns')
with pytest.raises(OverflowError):
max_td + Timedelta(1, 'ns')
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, 'ns')
assert isinstance(td, NaTType)
with pytest.raises(OverflowError):
Timedelta(min_td.value - 2, 'ns')
with pytest.raises(OverflowError):
Timedelta(max_td.value + 1, 'ns')
def test_timedelta_arithmetic(self):
data = pd.Series(['nat', '32 days'], dtype='timedelta64[ns]')
deltas = [timedelta(days=1), Timedelta(1, unit='D')]
for delta in deltas:
result_method = data.add(delta)
result_operator = data + delta
expected = pd.Series(['nat', '33 days'], dtype='timedelta64[ns]')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
result_method = data.sub(delta)
result_operator = data - delta
expected = pd.Series(['nat', '31 days'], dtype='timedelta64[ns]')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
# GH 9396
result_method = data.div(delta)
result_operator = data / delta
expected = pd.Series([np.nan, 32.], dtype='float64')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
def test_apply_to_timedelta(self):
timedelta_NaT = pd.to_timedelta('NaT')
list_of_valid_strings = ['00:00:01', '00:00:02']
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT]
# TODO: unused?
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
def test_components(self):
rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
rng.components
# with nat
s = Series(rng)
s[1] = np.nan
result = s.dt.components
assert not result.iloc[0].isna().all()
assert result.iloc[1].isna().all()
def test_isoformat(self):
td = Timedelta(days=6, minutes=50, seconds=3,
milliseconds=10, microseconds=10, nanoseconds=12)
expected = 'P6DT0H50M3.010010012S'
result = td.isoformat()
assert result == expected
td = Timedelta(days=4, hours=12, minutes=30, seconds=5)
result = td.isoformat()
expected = 'P4DT12H30M5S'
assert result == expected
td = Timedelta(nanoseconds=123)
result = td.isoformat()
expected = 'P0DT0H0M0.000000123S'
assert result == expected
# trim nano
td = Timedelta(microseconds=10)
result = td.isoformat()
expected = 'P0DT0H0M0.00001S'
assert result == expected
# trim micro
td = Timedelta(milliseconds=1)
result = td.isoformat()
expected = 'P0DT0H0M0.001S'
assert result == expected
# don't strip every 0
result = Timedelta(minutes=1).isoformat()
expected = 'P0DT0H1M0S'
assert result == expected
def test_ops_error_str(self):
# GH 13624
td = Timedelta('1 day')
for l, r in [(td, 'a'), ('a', td)]:
with pytest.raises(TypeError):
l + r
with pytest.raises(TypeError):
l > r
assert not l == r
assert l != r
| bsd-3-clause | -4,043,777,534,284,601,000 | 38.51809 | 79 | 0.519281 | false |
Transkribus/TranskribusDU | TranskribusDU/tasks/DU_Table/DU_ABPTableCutAnnotator.py | 1 | 41809 | # -*- coding: utf-8 -*-
"""
Find cuts of a page and annotate them based on the table separators
Copyright Naver Labs Europe 2018
JL Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
from optparse import OptionParser
import operator
from collections import defaultdict
from lxml import etree
import numpy as np
import shapely.geometry as geom
import shapely.affinity
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
from common.trace import traceln
from xml_formats.PageXml import MultiPageXml, PageXml
from util.Polygon import Polygon
from util.Shape import ShapeLoader, PolygonPartition
from tasks.DU_Table.DU_ABPTableSkewed_CutAnnotator import _isBaselineNotO, _isBaselineInTable,\
computePRF
from tasks.DU_Table.DU_ABPTableRCAnnotation import computeMaxRowSpan
from util.partitionEvaluation import evalPartitions
from util.jaccard import jaccard_distance
class CutAnnotator:
"""
Cutting the page horizontally
"""
fRATIO = 0.66
def __init__(self):
pass
def get_separator_YX_from_DOM(self, root, fMinPageCoverage):
"""
get the x and y of the GT table separators
return lists of y, for horizontal and of x for vertical separators, per page
return [(y_list, x_list), ...]
"""
ltlYlX = []
for ndPage in MultiPageXml.getChildByName(root, 'Page'):
w, h = int(ndPage.get("imageWidth")), int(ndPage.get("imageHeight"))
lYi, lXi = [], []
l = MultiPageXml.getChildByName(ndPage,'TableRegion')
if len(l) != 1:
if l:
traceln("** warning ** %d TableRegion instead of expected 1" % len(l))
else:
traceln("** warning ** no TableRegion, expected 1")
if l:
for ndTR in l:
#enumerate the table separators
for ndSep in MultiPageXml.getChildByName(ndTR,'SeparatorRegion'):
sPoints=MultiPageXml.getChildByName(ndSep,'Coords')[0].get('points')
[(x1,y1),(x2,y2)] = Polygon.parsePoints(sPoints).lXY
dx, dy = abs(x2-x1), abs(y2-y1)
if dx > dy:
#horizontal table line
if dx > (fMinPageCoverage*w):
#ym = (y1+y2)/2.0 # 2.0 to support python2
lYi.append((y1,y2))
else:
if dy > (fMinPageCoverage*h):
#xm = (x1+x2)/2.0
lXi.append((x1,x2))
ltlYlX.append( (lYi, lXi) )
return ltlYlX
def getHisto(self, lNd, w, _fMinHorizProjection, h, _fMinVertiProjection
, fRatio=1.0
, fMinHLen=None):
"""
return two Numpy array reflecting the histogram of projections of objects
first array along Y axis (horizontal projection), 2nd along X axis
(vertical projection)
when fMinHLen is given , we do not scale horizontally text shorter than fMinHLen
"""
hy = np.zeros((h,), np.float)
hx = np.zeros((w,), np.float)
for nd in lNd:
sPoints=MultiPageXml.getChildByName(nd,'Coords')[0].get('points')
try:
x1,y1,x2,y2 = Polygon.parsePoints(sPoints).fitRectangle()
if fMinHLen is None or abs(x2-x1) > fMinHLen:
_x1, _x2 = self.scale(x1, x2, fRatio)
else:
_x1, _x2 = x1, x2
_y1, _y2 = self.scale(y1, y2, fRatio)
hy[_y1:_y2+1] += float(x2 - x1) / w
hx[_x1:_x2+1] += float(y2 - y1) / h
except ZeroDivisionError:
pass
except ValueError:
pass
return hy, hx
@classmethod
def scale(cls, a, b, fRatio):
"""
a,b are integers
apply a scaling factor to the segment
make sure its length remains non-zero
return 2 integers
"""
if fRatio == 1.0: return (a,b) # the code below does it, but no need...
l = b - a # signed length
ll = int(round(l * fRatio)) # new signed length
dl2 = (l - ll) / 2.0
ll2a = int(round(dl2))
ll2b = (l - ll) - ll2a
return a + ll2a, b - ll2b
# labels...
def _getLabel(self, i,j, liGT):
"""
i,j are the index of teh start and end of interval of zeros
liGT is a list of pair of pixel coordinates
an interval of zeros is positive if it contains either end of the
separator or its middle.
"""
for iGT, jGT in liGT:
mGT = (iGT+jGT) // 2
if i <= iGT and iGT <= j:
return "S"
elif i <= jGT and jGT <= j:
return "S"
elif i <= mGT and mGT <= j:
return "S"
return "O"
def getCentreOfZeroAreas(self, h, liGT=None):
"""
liGT is the groundtruth indices
return a list of center of areas contains consecutive 0s
"""
lij = [] #list of area indices
i0 = None # index of start of a 0 area
imax = h.shape[0]
i = 0
while i < imax:
if i0 is None: # we were in a non-zero area
if h[i] <= 0: i0 = i # start of an area of 0s
else: # we were in a zero area
if h[i] > 0:
# end of area of 0s
lij.append((i0, i-1))
i0 = None
i += 1
if not i0 is None:
lij.append((i0, imax-1))
if liGT is None:
liLbl = [None] * len(lij)
else:
liLbl = [self._getLabel(i,j,liGT) for (i,j) in lij]
#take middle
li = [ (j + i) // 2 for (i,j) in lij ]
return li, liLbl
def getLowestOfZeroAreas(self, h, liGT=None):
"""
liGT is the groundtruth indices
return a list of lowest points of areas contains consecutive 0s
"""
lijm = [] #list of area indices
i0 = None # index of start of a 0 area
imax = h.shape[0]
i = 0
minV, minI = None, None
while i < imax:
if i0 is None: # we were in a non-zero area
if h[i] <= 0:
i0 = i # start of an area of 0s
minV, minI = h[i0], i0
else: # we were in a zero area
if h[i] > 0:
# end of area of 0s
lijm.append((i0, i-1, minI))
i0 = None
else:
if h[i] <= minV: # take rightmost
minV, minI = h[i], i
i += 1
if not i0 is None:
minV, minI = h[i0], i0
i = i0 + 1
while i < imax:
if h[i] < minV: # tale leftmost
minV, minI = h[i], i
i += 1
lijm.append((i0, imax-1, minI))
if liGT is None:
liLbl = [None] * len(lijm)
else:
liLbl = [self._getLabel(i,j,liGT) for (i,j,_m) in lijm]
#take middle
li = [ m for (_i,_j, m) in lijm ]
return li, liLbl
def add_cut_to_DOM(self, root,
fMinHorizProjection=0.05,
fMinVertiProjection=0.05,
ltlYlX=[]
, fRatio = 1.0
, fMinHLen = None):
"""
for each page, compute the histogram of projection of text on Y then X
axis.
From this histogram, find cuts.
fMinProjection determines the threholds as a percentage of width (resp
height) of page. Any bin lower than it is considered as zero.
Map cuts to table separators to annotate them
Dynamically tune the threshold for cutting so as to reflect most separators
as a cut.
Tag them if ltlYlX is given
ltlYlX is a list of (ltY1Y2, ltX1X2) per page.
ltY1Y2 is the list of (Y1, Y2) of horizontal separators,
ltX1X2 is the list of (X1, X2) of vertical separators.
Modify the XML DOM by adding a separator cut, annotated if GT given
"""
domid = 0 #to add unique separator id
llX, llY = [], []
for iPage, ndPage in enumerate(MultiPageXml.getChildByName(root, 'Page')):
try:
lYi, lXi = ltlYlX[iPage]
#except TypeError:
except:
lYi, lXi = [], []
w, h = int(ndPage.get("imageWidth")), int(ndPage.get("imageHeight"))
#Histogram of projections
lndTexLine = MultiPageXml.getChildByName(ndPage, 'TextLine')
aYHisto, aXHisto = self.getHisto(lndTexLine,
w, fMinHorizProjection,
h, fMinVertiProjection
, fRatio
, fMinHLen=fMinHLen)
aYHisto = aYHisto - fMinHorizProjection
aXHisto = aXHisto - fMinVertiProjection
#find the centre of each area of 0s and its label
lY, lYLbl = self.getCentreOfZeroAreas(aYHisto, lYi)
# lX, lXLbl = self.getCentreOfZeroAreas(aXHisto, lXi)
lX, lXLbl = self.getLowestOfZeroAreas(aXHisto, lXi)
traceln(lY)
traceln(lX)
traceln(" - %d horizontal cuts" % len(lY))
traceln(" - %d vertical cuts" % len(lX))
#ndTR = MultiPageXml.getChildByName(ndPage,'TableRegion')[0]
# horizontal grid lines
for y, ylbl in zip(lY, lYLbl):
domid += 1
self.addPageXmlSeparator(ndPage, ylbl, 0, y, w, y, domid)
# Vertical grid lines
for x, xlbl in zip(lX, lXLbl):
domid += 1
self.addPageXmlSeparator(ndPage, xlbl, x, 0, x, h, domid)
llX.append(lX)
llY.append(lY)
return (llY, llX)
@classmethod
def addPageXmlSeparator(cls, nd, sLabel, x1, y1, x2, y2, domid):
ndSep = MultiPageXml.createPageXmlNode("CutSeparator")
if not sLabel is None:
# propagate the groundtruth info we have
ndSep.set("type", sLabel)
if abs(x2-x1) > abs(y2-y1):
ndSep.set("orient", "0")
else:
ndSep.set("orient", "90")
ndSep.set("id", "s_%d"%domid)
nd.append(ndSep)
ndCoord = MultiPageXml.createPageXmlNode("Coords")
MultiPageXml.setPoints(ndCoord, [(x1, y1), (x2, y2)])
ndSep.append(ndCoord)
return ndSep
def remove_cuts_from_dom(self, root):
"""
clean the DOM from any existing cut
return the number of removed cut lines
"""
lnd = MultiPageXml.getChildByName(root,'CutSeparator')
n = len(lnd)
for nd in lnd:
nd.getparent().remove(nd)
#check...
lnd = MultiPageXml.getChildByName(root,'CutSeparator')
assert len(lnd) == 0
return n
def loadPageCol(self, ndPage, fRatio
, shaper_fun=ShapeLoader.node_to_Point
, funIndex=lambda x: x._du_index):
"""
load the page, looking for Baseline
can filter by DU_row
return a list of shapely objects
, a dict of sorted list of objects, by column
GT BUG: some Baseline are assigned to the wrong Cell
=> we also fix this here....
"""
loBaseline = [] # list of Baseline shapes
i = 0
dsetTableByCol = defaultdict(set) # sets of object ids, by col
dsetTableDataByCol = defaultdict(set) # sets of object ids, by col
dO = {}
dNodeSeen = {}
# first associate a unique id to each baseline and list them
lshapeCell = []
lOrphanBaselineShape = []
lCells = MultiPageXml.getChildByName(ndPage, "TableCell")
maxHeaderRowSpan = computeMaxRowSpan(lCells)
traceln(" - maxHeaderRowSpan=", maxHeaderRowSpan)
for ndCell in lCells:
row, col = int(ndCell.get("row")), int(ndCell.get("col"))
rowSpan = int(ndCell.get("rowSpan"))
plg = ShapeLoader.node_to_Polygon(ndCell)
#ymin, ymax of polygon
lx = [_x for _x, _y in plg.exterior.coords]
xmin, xmax = min(lx), max(lx)
plg._row = row
plg._col = col
plg._xmin, plg._xmax = xmin, xmax
lshapeCell.append(plg)
for nd in MultiPageXml.getChildByName(ndCell, "Baseline"):
nd.set("du_index", "%d" % i)
ndParent = nd.getparent()
dNodeSeen[ndParent.get('id')] = True
# Baseline as a shapely object
try:
o = shaper_fun(nd) #make a LineString
except Exception as e:
traceln("ERROR: id=", nd.getparent().get("id"))
raise e
# scale the objects, as done when cutting!!
# useless currently since we make a Point...
o = shapely.affinity.scale(o, xfact=fRatio, yfact=fRatio)
o._du_index = i
o._du_nd = nd
o._dom_id = nd.getparent().get("id")
loBaseline.append(o)
# is this object in the correct cell???
# We must use the centroid of the text box, otherwise a baseline
# may be assigned to the next row
# NOOO x = ShapeLoader.node_to_Polygon(ndParent).centroid.x
# we must look for the leftest coordinate
# NO CHECK FOR COLUMNS
dsetTableByCol[col].add(funIndex(o))
if (row+rowSpan) > maxHeaderRowSpan:
dsetTableDataByCol[col].add(funIndex(o))
i += 1
# if lOrphanBaselineShape:
# traceln(" *** error: %d Baseline in incorrect row - fixing this..." % len(lOrphanBaselineShape))
# for o in lOrphanBaselineShape:
# bestrow, bestdeltacol = 0, 9999
# try:
# y = o.y
# except:
# y = o.centroid.y
# for plg in lshapeCell:
# if plg._ymin <= y and y <= plg._ymax:
# # sounds good
# deltacol = abs(o._bad_cell._col - plg._col)
# if deltacol == 0:
# # same column, ok it is that one
# bestrow = plg._row
# break
# else:
# if bestdeltacol > deltacol:
# bestdeltacol = deltacol
# bestrow = plg._row
# traceln("\t id=%s misplaced in row=%s instead of row=%s" %(
# o._du_nd.getparent().get("id")
# , o._bad_cell._row
# , bestrow))
# dsetTableByCol[bestrow].add(o._du_index)
# del o._bad_cell
# and (UGLY) process all Baseline outside any TableCell...
for nd in MultiPageXml.getChildByName(ndPage, "Baseline"):
try:
dNodeSeen[nd.getparent().get('id')]
except:
#OLD "GOOD" CODE HERE
nd.set("du_index", "%d" % i)
# Baseline as a shapely object
o = shaper_fun(nd) #make a LineString
# scale the objects, as done when cutting!!
o = shapely.affinity.scale(o, xfact=fRatio)
o._du_index = i
o._du_nd = nd
o._dom_id = nd.getparent().get("id")
loBaseline.append(o)
i += 1
return loBaseline, dsetTableByCol, dsetTableDataByCol, maxHeaderRowSpan
class NoSeparatorException(Exception):
pass
class BaselineCutAnnotator(CutAnnotator):
"""
Much simpler approach:
- a block is defined by its baseline.
- the baseline of each block defines a possible cut
- a parameter defines if the corresponding block is above or below the cut
- so a cut defines a partition of the page block
We use the table annotation to determine the baseline that is the on top
or bottom of each table line (or column)
"""
bSIO = False # by default, we use SO as labels
#iModulo = 1
def __init__(self, bCutIsBeforeText=True):
CutAnnotator.__init__(self)
self.bCutIsBeforeText = bCutIsBeforeText
#self._fModulo = float(self.iModulo)
@classmethod
def setLabelScheme_SIO(cls):
cls.bSIO = True
return True
# def setModulo(self, iModulo):
# self.iModulo = iModulo
# self._fModulo = float(self.iModulo)
# def moduloSnap(self, x, y):
# """
# return the same coordinate modulo the current modulo
# """
# return (int(round(x / self.fModulo)) * self.iModulo,
# int(round(y / self.fModulo)) * self.iModulo)
@classmethod
def getDomBaselineXY(cls, domNode):
"""
find the baseline descendant node and return its "central" point
"""
try:
ndBaseline = MultiPageXml.getChildByName(domNode,'Baseline')[0]
except IndexError as e:
traceln("WARNING: No Baseline child in ", domNode.get('id'))
raise e
x, y = cls.getPolylineAverageXY(ndBaseline)
# modulo should be done only after the GT assigns labels.
return (x, y)
@classmethod
def getPolylineAverageXY(cls, ndPolyline):
"""
weighted average X and average Y of a polyline
the weight indicate how long each segment at a given X, or Y, was.
"""
sPoints=ndPolyline.get('points')
lXY = Polygon.parsePoints(sPoints).lXY
# list of X and Y values and respective weights
lXYWxWy = [((x1+x2)/2.0, abs(y2-y1), # for how long at this X?
(y1+y2)/2.0, abs(x2-x1)) \
for (x1,y1), (x2, y2) in zip(lXY, lXY[1:])]
fWeightedSumX = sum(x*wx for x, wx, _, _ in lXYWxWy)
fWeightedSumY = sum(y*wy for _, _, y, wy in lXYWxWy)
fSumWeightX = sum( wx for _, wx , _, _ in lXYWxWy)
fSumWeightY = sum( wy for _, _ , _, wy in lXYWxWy)
Xavg = int(round(fWeightedSumX/fSumWeightX)) if fSumWeightX > 0 else 0
Yavg = int(round(fWeightedSumY/fSumWeightY)) if fSumWeightY > 0 else 0
# Xavg, Yavg = self.moduloSnap(Xavg, Yavg)
return (Xavg, Yavg)
def _getLabelFromSeparator(self, ltXY, tlYlX, w, h):
"""
ltXY is the list of (X, Y) of the "central" point of each baseline
tlYlX are the coordinates of the GT separators
ltY1Y2 is the list of (Y1, Y2) of horizontal separators,
ltX1X2 is the list of (X1, X2) of vertical separators.
w, h are the page width and height
if self.bCutIsBeforeText is True, we look for the highest baseline below
or on each separator (which is possibly not horizontal)
if self.bCutIsBeforeText is False, we look for the lowest baseline above
or on each separator (which is possibly not horizontal)
#TODO
Same idea for vertical separators ( ***** NOT DONE ***** )
return lX, lY, lXLbl, lYLbl
"""
ltY1Y2, ltX1X2 = tlYlX
#rough horizontal and vertical bounds
try:
ymin = operator.add(*min(ltY1Y2)) / 2.0 # ~~ (miny1+miny2)/2.0
ymax = operator.add(*max(ltY1Y2)) / 2.0
xmin = operator.add(*min(ltX1X2)) / 2.0
xmax = operator.add(*max(ltX1X2)) / 2.0
except ValueError:
raise NoSeparatorException("No groundtruth")
# find best baseline for each table separator
setBestY = set()
for (y1, y2) in ltY1Y2:
bestY = 999999 if self.bCutIsBeforeText else -1
bFound = False
for x, y in ltXY:
if x < xmin or xmax < x: # text outside table, ignore it
continue
#y of separator at x
ysep = int(round(y1 + float(y2-y1) * x / w))
if self.bCutIsBeforeText:
if ysep <= y and y < bestY and y < ymax:
#separator is above and baseline is above all others
bestY, bFound = y, True
else:
if ysep >= y and y > bestY and y > ymin:
bestY, bFound = y, True
if bFound:
setBestY.add(bestY)
setBestX = set()
for (x1, x2) in ltX1X2:
bestX = 999999 if self.bCutIsBeforeText else -1
bFound = False
for x, y in ltXY:
if y < ymin or ymax < y: # text outside table, ignore it
continue
#x of separator at Y
xsep = int(round(x1 + float(x2-x1) * x / h))
if self.bCutIsBeforeText:
if xsep <= x and x < bestX and x < xmax:
#separator is above and baseline is above all others
bestX, bFound = x, True
else:
if xsep >= x and x > bestX and x > xmin:
bestX, bFound = x, True
if bFound:
setBestX.add(bestX)
# zero or one cut given a position
lY = list(set(y for _, y in ltXY)) # zero or 1 cut per Y
lY.sort()
lX = list(set(x for x, _ in ltXY)) # zero or 1 cut per X
lX.sort()
if self.bSIO:
# O*, S, (S|I)*, O*
if setBestY:
lYLbl = [ ("S" if y in setBestY \
else ("I" if ymin <= y and y <= ymax else "O")) \
for y in lY]
else:
lYLbl = ["O"] * len(lY) # should never happen...
if setBestX:
lXLbl = [ ("S" if x in setBestX \
else ("I" if xmin <= x and x <= xmax else "O")) \
for x in lX]
else:
lXLbl = ["O"] * len(lX) # should never happen...
else:
# annotate the best baseline-based separator
lYLbl = [ ("S" if y in setBestY else "O") for y in lY]
lXLbl = [ ("S" if x in setBestX else "O") for x in lX]
return lY, lYLbl, lX, lXLbl
# def _getLabelFromCells(self, ltXY, lCells):
# """
#
# NOT FINISHED
#
# SOME spans are ignored, some not
#
# This is done when making the straight separator, based on their length.
#
# ltXY is the list of (X, Y) of the "central" point of each baseline
# lCells is the list of cells of the table
#
# For Y labels (horizontal cuts):
# - if self.bCutIsBeforeText is True, we look for the highest baseline of
# each table line.
# - if self.bCutIsBeforeText is False, we look for the lowest baseline of
# each table line.
#
# same idea for X labels (vertical cuts)
#
# returns the list of Y labels, the list of X labels
# """
#
# lYLbl, lXLbl = [], []
#
# traceln("DIRTY: ignore rowspan above 5")
# lCells = list(filter(lambda x: int(x.get('rowSpan')) < 5, lCells))
# dBestByRow = collections.defaultdict(lambda _: None) # row->best_Y
# dBestByCol = collections.defaultdict(lambda _: None) # col->best_X
#
# dRowSep_lSgmt = collections.defaultdict(list)
# dColSep_lSgmt = collections.defaultdict(list)
# for cell in lCells:
# row, col, rowSpan, colSpan = [int(cell.get(sProp)) for sProp \
# in ["row", "col", "rowSpan", "colSpan"] ]
# coord = cell.xpath("./a:%s" % ("Coords"),namespaces={"a":MultiPageXml.NS_PAGE_XML})[0]
# sPoints = coord.get('points')
# plgn = Polygon.parsePoints(sPoints)
# lT, lR, lB, lL = plgn.partitionSegmentTopRightBottomLeft()
#
# #now the top segments contribute to row separator of index: row
# dRowSep_lSgmt[row].extend(lT)
# #now the bottom segments contribute to row separator of index: row+rowSpan
# dRowSep_lSgmt[row+rowSpan].extend(lB)
#
# dColSep_lSgmt[col].extend(lL)
# dColSep_lSgmt[col+colSpan].extend(lR)
def add_cut_to_DOM(self, root, ltlYlX=[]):
"""
for each page:
- sort the block by their baseline average y
- the sorted list of Ys defines the cuts.
Tag them if ltlYlX is given
ltlYlX is a list of (ltY1Y2, ltX1X2) per page.
ltY1Y2 is the list of (Y1, Y2) of horizontal separators,
ltX1X2 is the list of (X1, X2) of vertical separators.
Modify the XML DOM by adding a separator cut, annotated if GT given
"""
domid = 0 #to add unique separator id
ltlYCutXCut = []
for iPage, ndPage in enumerate(MultiPageXml.getChildByName(root, 'Page')):
w, h = int(ndPage.get("imageWidth")), int(ndPage.get("imageHeight"))
# list of Ys of baselines, and indexing of block by Y
#list of (X,Y)
ltXY = []
lndTexLine = MultiPageXml.getChildByName(ndPage, 'TextLine')
for ndBlock in lndTexLine:
try:
ltXY.append(self.getDomBaselineXY(ndBlock))
except:
pass
# Groundtruth if any
#lCells= MultiPageXml.getChildByName(ndPage, 'TableCell')
# let's collect the segment forming the separators
try:
lY, lYLbl, lX, lXLbl = self._getLabelFromSeparator(ltXY,
ltlYlX[iPage], w, h)
except NoSeparatorException:
lX = list(set(x for x, _ in ltXY)) # zero or 1 cut per X
lY = list(set(y for _, y in ltXY)) # zero or 1 cut per Y
lX.sort() # to have a nice XML
lY.sort()
lXLbl = [None] * len(lX)
lYLbl = [None] * len(lY)
ndTR = MultiPageXml.getChildByName(root,'TableRegion')[0]
#Vertical grid lines
for y, ylbl in zip(lY, lYLbl):
domid += 1
self.addPageXmlSeparator(ndTR, ylbl, 0, y, w, y, domid)
traceln(" - added %d horizontal cuts" % len(lX))
#horizontal grid lines
for x, xlbl in zip(lX, lXLbl):
domid += 1
self.addPageXmlSeparator(ndTR, xlbl, x, 0, x, h, domid)
traceln(" - added %d vertical cuts" % len(lY))
ltlYCutXCut.append( ([y for _,y in ltXY],
[x for x,_ in ltXY]))
return ltlYCutXCut
# ------------------------------------------------------------------
def main(sFilename, sOutFilename, fMinHorizProjection=0.05, fMinVertiProjection=0.05
, bBaselineFirst=False
, bBaselineLast=False
, bSIO=False):
print("- cutting: %s --> %s"%(sFilename, sOutFilename))
# Some grid line will be O or I simply because they are too short.
fMinPageCoverage = 0.5 # minimum proportion of the page crossed by a grid line
# we want to ignore col- and row- spans
#for the pretty printer to format better...
parser = etree.XMLParser(remove_blank_text=True)
doc = etree.parse(sFilename, parser)
root=doc.getroot()
if bBaselineFirst:
doer = BaselineCutAnnotator(bCutIsBeforeText=True)
if bSIO: doer.setLabelScheme_SIO()
elif bBaselineLast:
doer = BaselineCutAnnotator(bCutIsBeforeText=False)
if bSIO: doer.setLabelScheme_SIO()
else:
doer = CutAnnotator()
print("doer=%s"%doer)
#map the groundtruth table separators to our grid, per page (1 in tABP)
ltlYlX = doer.get_separator_YX_from_DOM(root, fMinPageCoverage)
# Find cuts and map them to GT
#
if bBaselineFirst or bBaselineLast:
doer.add_cut_to_DOM(root, ltlYlX=ltlYlX)
else:
doer.add_cut_to_DOM(root, ltlYlX=ltlYlX,
fMinHorizProjection=fMinHorizProjection,
fMinVertiProjection=fMinVertiProjection,)
#l_DU_row_Y, l_DU_row_GT = doer.predict(root)
doc.write(sOutFilename, encoding='utf-8',pretty_print=True,xml_declaration=True)
print('Annotated cut separators added into %s'%sOutFilename)
global_maxHeaderRowSpan = None
def _isBaselineInTableData(nd):
"""
a Baseline in a TableRegion belongs to a TableCell element
"""
global global_maxHeaderRowSpan
v = nd.getparent().getparent().get("row")
if v is None:
return False
else:
return int(v) >= global_maxHeaderRowSpan
def get_col_partition(doer, sxpCut, dNS
, sFilename, lFilterFun
, fRatio
, bVerbose=False
, funIndex=lambda x: x._du_index
):
"""
return the GT partition in columns, as well as 1 partition per filter function
"""
global global_maxHeaderRowSpan
if bVerbose: traceln("- loading %s"%sFilename)
parser = etree.XMLParser()
doc = etree.parse(sFilename, parser)
root=doc.getroot()
llsetRun = []
pnum = 0
lndPage = MultiPageXml.getChildByName(root, 'Page')
assert len(lndPage) == 1, "NOT SUPPORTED: file has many pages - soorry"
for ndPage in lndPage:
pnum += 1
if bVerbose: traceln(" - page %s - loading table GT" % pnum)
loBaseline, dsetTableByCol, dsetTableDataByCol, global_maxHeaderRowSpan = doer.loadPageCol(ndPage, fRatio
, funIndex=funIndex)
if bVerbose: traceln(" - found %d objects on page" % (len(loBaseline)))
# make a dictionary of cumulative sets, and the set of all objects
lTableColK = sorted(dsetTableByCol.keys())
lTableDataColK = sorted(dsetTableDataByCol.keys())
if bVerbose:
traceln(" - found %d cols" % (len(lTableColK)))
traceln(" - found %d objects in the table" % (sum(len(v) for v in dsetTableByCol.values())))
traceln(" - found %d objects in the table data" % (sum(len(v) for v in dsetTableDataByCol.values())))
lNdCut = ndPage.xpath(sxpCut, namespaces=dNS)
if bVerbose:
traceln(" - found %d cuts" % (len(lNdCut)))
else:
traceln("- loaded %40s " % sFilename
, " %6d cols %6d 'S' cuts" % ( len(lTableColK)
, len(lNdCut))
, " %6d objects %6d table objects" % (
len(loBaseline)
, sum(len(v) for v in dsetTableByCol.values())
)
)
loCut = []
for ndCut in lNdCut:
#now we need to infer the bounding box of that object
(x1, y1), (x2, y2) = PageXml.getPointList(ndCut) #the polygon
# Create the shapely shape
loCut.append(geom.LineString([(x1, y1), (x2, y2)]))
w,h = float(ndPage.get("imageWidth")), float(ndPage.get("imageHeight"))
# # Add a fictive cut at top of page
# loCut.append(geom.LineString([(0, 0), (w, 0)]))
# # Add a fictive cut at end of page
# loCut.append(geom.LineString([(0, h), (w, h)]))
# order it by line centroid x
loCut.sort(key=lambda o: o.centroid.x)
# dcumset is the GT!!
lsetGT = [dsetTableByCol[k] for k in lTableColK] # list of set of du_index
lsetDataGT = [dsetTableDataByCol[k] for k in lTableDataColK]
# NOW, look at predictions
for filterFun in lFilterFun:
loBaselineInTable = [o for o in loBaseline if filterFun(o._du_nd)]
if bVerbose: traceln(" - %d objects on page predicted in table (%d out)" % (
len(loBaselineInTable)
, len(loBaseline) - len(loBaselineInTable)))
# Now create the list of partitions created by the Cuts
lsetRun = []
partition = PolygonPartition(loBaselineInTable)
if True: # or bCutOnLeft:
#cut if above the text that led to its creation
setAllPrevIds = set([]) # cumulative set of what was already taken
for oCut in loCut:
lo = partition.getObjectOnRightOfLine(oCut)
setIds = set(funIndex(o) for o in lo)
#print(oCut.centroid.x, setIds)
if setAllPrevIds:
prevColIds = setAllPrevIds.difference(setIds) # content of previous row
if prevColIds:
#an empty set is denoting alternative cuts leading to same partition
lsetRun.append(prevColIds)
setAllPrevIds = setIds
else:
assert False, "look at this code..."
# #cut if below the text that led to its creation
# cumSetIds = set([]) # cumulative set
# for oCut in loCut:
# lo = partition.getObjectAboveLine(oCut)
# setIds = set(o._du_index for o in lo)
# rowIds = setIds.difference(cumSetIds) # only last row!
# if rowIds:
# #an empty set is denoting alternative cuts leading to same partition
# lsetRun.append(rowIds)
# cumSetIds = setIds
# _debugPartition("run", lsetRun)
# _debugPartition("ref", lsetGT)
llsetRun.append(lsetRun)
return lsetGT, lsetDataGT, llsetRun
def op_eval_col(lsFilename, fSimil, fRatio, bVerbose=False):
"""
We load the XML
- get the CutSeparator elements
- get the text objects (geometry=Baseline)
-
"""
global global_maxHeaderRowSpan
nOk, nErr, nMiss = 0, 0, 0
if fSimil is None:
#lfSimil = [ i / 100 for i in range(75, 101, 5)]
lfSimil = [ i / 100 for i in range(70, 101, 10)]
else:
lfSimil = [fSimil]
# we use only BIO + separators
dOkErrMissOnlyCol = { fSimil:(0,0,0) for fSimil in lfSimil }
dOkErrMissOnlyCol.update({'name':'OnlyCol'
, 'FilterFun':_isBaselineNotO})
# we use the TableRegion + separators
dOkErrMissTableCol = { fSimil:(0,0,0) for fSimil in lfSimil }
dOkErrMissTableCol.update({'name':'TableCol'
, 'FilterFun':_isBaselineInTable})
# we use the TableRegion excluding the header + separators
dOkErrMissTableDataCol = { fSimil:(0,0,0) for fSimil in lfSimil }
dOkErrMissTableDataCol.update({'name':'TableDataCol'
, 'FilterFun':_isBaselineInTableData})
ldOkErrMiss = [dOkErrMissOnlyCol, dOkErrMissTableCol, dOkErrMissTableDataCol]
lFilterFun = [d['FilterFun'] for d in ldOkErrMiss]
# sxpCut = './/pc:CutSeparator[@orient="0" and @DU_type="S"]' #how to find the cuts
sxpCut = './/pc:CutSeparator[@orient="90"]' #how to find the cuts
dNS = {"pc":PageXml.NS_PAGE_XML}
doer = CutAnnotator()
traceln(" - Cut selector = ", sxpCut)
# load objects: Baseline and Cuts
for n, sFilename in enumerate(lsFilename):
lsetGT, lsetDataGT, llsetRun = get_col_partition(doer, sxpCut, dNS
, sFilename, lFilterFun
, fRatio
, bVerbose=False
, funIndex=lambda x: x._du_index # simpler to view
# , funIndex=lambda x: x._dom_id # more precise
)
pnum = 1 # only support single-page file...
for dOkErrMiss, lsetRun in zip(ldOkErrMiss, llsetRun):
if dOkErrMiss['name'] == "TableDataCol":
# we need to filter also the GT to discard the header from the column
_lsetGT = lsetDataGT
else:
_lsetGT = lsetGT
if bVerbose:
traceln("----- RUN ----- ")
for s in lsetRun: traceln("run ", sorted(s))
traceln("----- REF ----- ")
for s in _lsetGT: traceln("ref ", sorted(s))
for fSimil in lfSimil:
nOk, nErr, nMiss = dOkErrMiss[fSimil]
_nOk, _nErr, _nMiss, _lFound, _lErr, _lMissed = evalPartitions(lsetRun, _lsetGT, fSimil, jaccard_distance)
nOk += _nOk
nErr += _nErr
nMiss += _nMiss
if bVerbose or fSimil == 1.0:
_fP, _fR, _fF = computePRF(_nOk, _nErr, _nMiss)
traceln("%4d %8s simil:%.2f P %5.1f R %5.1f F1 %5.1f ok=%6d err=%6d miss=%6d %s page=%d" %(
n+1, dOkErrMiss['name'], fSimil
, _fP, _fR, _fF
, _nOk, _nErr, _nMiss
, os.path.basename(sFilename), pnum))
dOkErrMiss[fSimil] = (nOk, nErr, nMiss)
for dOkErrMiss in [dOkErrMissOnlyCol, dOkErrMissTableCol, dOkErrMissTableDataCol]:
traceln()
name = dOkErrMiss['name']
for fSimil in lfSimil:
nOk, nErr, nMiss = dOkErrMiss[fSimil]
fP, fR, fF = computePRF(nOk, nErr, nMiss)
traceln("ALL %8s simil:%.2f P %5.1f R %5.1f F1 %5.1f " % (name, fSimil, fP, fR, fF )
, " "
,"ok=%d err=%d miss=%d" %(nOk, nErr, nMiss))
return (nOk, nErr, nMiss)
def test_scale():
assert (1,3) == CutAnnotator.scale(1, 3, 1.0)
assert (3,1) == CutAnnotator.scale(3, 1, 1.0)
def symcheck(a, b, r, aa, bb):
assert (aa, bb) == CutAnnotator.scale(a, b, r), (a, b, r, aa, bb)
assert (bb, aa) == CutAnnotator.scale(b, a, r), (b, a, r, bb, aa)
symcheck(1, 2, 1.0, 1, 2)
symcheck(1, 1, 1.0, 1, 1)
symcheck(1, 10, 1.0, 1, 10)
assert (2,7) == CutAnnotator.scale(0 , 10, 0.5)
assert (8,3) == CutAnnotator.scale(10, 0 , 0.5)
assert (-2,-7) == CutAnnotator.scale(-0 , -10, 0.5)
assert (-8,-3) == CutAnnotator.scale(-10, -0 , 0.5)
assert (1,1) == CutAnnotator.scale(1, 1, 0.33)
# ------------------------------------------------------------------
if __name__ == "__main__":
usage = ""
parser = OptionParser(usage=usage, version="0.1")
parser.add_option("--baseline_first", dest='bBaselineFirst', action="store_true", help="Cut based on first baeline of row or column")
parser.add_option("--SIO" , dest='bSIO' , action="store_true", help="SIO labels")
# ---
#parse the command line
(options, args) = parser.parse_args()
#load mpxml
sFilename = args[0]
try:
sOutFilename = args[1]
except:
sp, sf = os.path.split(sFilename)
sOutFilename = os.path.join(sp, "cut-" + sf)
try:
fMinH = float(args[2])
except:
fMinH = None
if fMinH is None:
main(sFilename, sOutFilename, bBaselineFirst=options.bBaselineFirst, bSIO=options.bSIO)
else:
fMinV = float(args[4]) # specify none or both
main(sFilename, sOutFilename, fMinH, fMinV, bBaselineFirst=options.bBaselineFirst, bSIO=options.bSIO)
| bsd-3-clause | -3,160,245,046,557,847,000 | 37.892093 | 139 | 0.509388 | false |
bgruening/EDeN | eden/modifier/graph/vertex_attributes.py | 1 | 6758 | import numpy as np
def incident_edge_label(graph_list=None, output_attribute='type', separator='', level=1):
"""
level: int
level=1 considers all incident edges
level=2 considers all edges incident on the neighbors
"""
for g in graph_list:
# iterate over nodes
for n, d in g.nodes_iter(data=True):
# for all neighbors
edge_labels = []
if level == 1:
edge_labels += [ed.get('label', 'N/A') for u, v, ed in g.edges_iter(n, data=True)]
elif level == 2:
neighbors = g.neighbors(n)
for nn in neighbors:
# extract list of edge labels
edge_labels += [ed.get('label', 'N/A') for u, v, ed in g.edges_iter(nn, data=True)]
else:
raise Exception('Unknown level: %s' % level)
# consider the sorted serialization of all labels as a type
vertex_type = separator.join(sorted(edge_labels))
g.node[n][output_attribute] = vertex_type
yield g
def incident_node_label(graph_list=None, output_attribute='type', separator='', level=1):
"""
level: int
level=1 considers all incident nodes
level=2 considers all nodes incident on the neighbors
"""
for g in graph_list:
# iterate over nodes
for n, d in g.nodes_iter(data=True):
# for all neighbors
node_labels = []
if level == 1:
node_labels += [g.node[u].get('label', 'N/A') for u in g.neighbors(n)]
elif level == 2:
neighbors = g.neighbors(n)
for nn in neighbors:
# extract list of labels
node_labels += [g.node[u].get('label', 'N/A') for u in g.neighbors(nn)]
else:
raise Exception('Unknown level: %s' % level)
# consider the sorted serialization of all labels as a type
vertex_type = separator.join(sorted(node_labels))
g.node[n][output_attribute] = vertex_type
yield g
def translate(graph_list=None, input_attribute='label', output_attribute='label', label_map=dict(), default=' '):
original_attribute = input_attribute + '_original'
for g in graph_list:
# iterate over nodes
for n, d in g.nodes_iter(data=True):
key = d.get(input_attribute, default)
g.node[n][original_attribute] = key
mapped_attribute = label_map.get(key, default)
g.node[n][output_attribute] = mapped_attribute
yield g
def colorize(graph_list=None, output_attribute='level', labels=['A', 'U', 'C', 'G']):
values = np.linspace(0.0, 1.0, num=len(labels))
color_dict = dict(zip(labels, values))
for g in graph_list:
# iterate over nodes
for n, d in g.nodes_iter(data=True):
g.node[n][output_attribute] = color_dict.get(d['label'], 0)
yield g
def colorize_binary(graph_list=None, output_attribute='color_value', input_attribute='weight', level=0):
for g in graph_list:
# iterate over nodes
for n, d in g.nodes_iter(data=True):
val = d.get(input_attribute, 0)
if val <= level:
color_value = 0
else:
color_value = 1
g.node[n][output_attribute] = color_value
yield g
def trapezoidal_reweighting(graph_list=None, high_weight=1.0, low_weight=0.1, high_weight_window_start=0, high_weight_window_end=1, low_weight_window_start=0, low_weight_window_end=1):
"""
Piece wise linear weight function between two levels with specified start end positions.
high ___
low __/ \__
"""
# assert high_ weight > low_weight
if high_weight < low_weight:
raise Exception('high_weight (%f) must be higher than low_weight (%f)' % (high_weight, low_weight))
# assert low_weight boundaries includes high_weight boundaries
if high_weight_window_start > low_weight_window_end:
raise Exception('high_weight_window_start (%d) must be lower than low_weight_window_end (%d)' %
(high_weight_window_start, low_weight_window_end))
if high_weight_window_start < low_weight_window_start:
raise Exception('high_weight_window_start (%d) must be higher than low_weight_window_start (%d)' %
(high_weight_window_start, low_weight_window_start))
if high_weight_window_end < low_weight_window_start:
raise Exception('high_weight_window_end (%d) must be higher than low_weight_window_start (%d)' %
(high_weight_window_end, low_weight_window_start))
if high_weight_window_end > low_weight_window_end:
raise Exception('high_weight_window_end (%d) must be higher than low_weight_window_end (%d)' %
(high_weight_window_end, low_weight_window_end))
for g in graph_list:
# iterate over nodes
for n, d in g.nodes_iter(data=True):
if 'position' not in d:
# assert nodes must have position attribute
raise Exception('Nodes must have "position" attribute')
# given the 'position' attribute of node assign weight according to piece wise linear weight function between two levels
pos = d['position']
if pos < low_weight_window_start:
"""
___
__/ \__
|
"""
g.node[n]["weight"] = low_weight
elif pos >= low_weight_window_start and pos < high_weight_window_start:
"""
___
__/ \__
|
"""
g.node[n]["weight"] = (high_weight - low_weight) / (high_weight_window_start - low_weight_window_start) * \
(pos - low_weight_window_start) + low_weight
elif pos >= high_weight_window_start and pos < high_weight_window_end:
"""
___
__/ \__
|
"""
g.node[n]["weight"] = high_weight
elif pos >= high_weight_window_end and pos < low_weight_window_end:
"""
___
__/ \__
|
"""
g.node[n]["weight"] = high_weight - \
(high_weight - low_weight) / (low_weight_window_end - high_weight_window_end) * (pos - high_weight_window_end)
else:
"""
___
__/ \__
|
"""
g.node[n]["weight"] = low_weight
yield g
| gpl-3.0 | 317,744,340,928,956,700 | 41.2375 | 184 | 0.534922 | false |
betrisey/home-assistant | homeassistant/components/sensor/arduino.py | 25 | 2027 | """
Support for getting information from Arduino pins.
Only analog pins are supported.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.arduino/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.components.arduino as arduino
from homeassistant.const import CONF_NAME
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_PINS = 'pins'
CONF_TYPE = 'analog'
DEPENDENCIES = ['arduino']
PIN_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_PINS):
vol.Schema({cv.positive_int: PIN_SCHEMA}),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Arduino platform."""
# Verify that the Arduino board is present
if arduino.BOARD is None:
_LOGGER.error("A connection has not been made to the Arduino board")
return False
pins = config.get(CONF_PINS)
sensors = []
for pinnum, pin in pins.items():
sensors.append(ArduinoSensor(pin.get(CONF_NAME), pinnum, CONF_TYPE))
add_devices(sensors)
class ArduinoSensor(Entity):
"""Representation of an Arduino Sensor."""
def __init__(self, name, pin, pin_type):
"""Initialize the sensor."""
self._pin = pin
self._name = name
self.pin_type = pin_type
self.direction = 'in'
self._value = None
arduino.BOARD.set_mode(self._pin, self.direction, self.pin_type)
@property
def state(self):
"""Return the state of the sensor."""
return self._value
@property
def name(self):
"""Get the name of the sensor."""
return self._name
def update(self):
"""Get the latest value from the pin."""
self._value = arduino.BOARD.get_analog_inputs()[self._pin][1]
| mit | -4,180,161,488,686,603,300 | 25.324675 | 76 | 0.671436 | false |
Komodo/dbexplorer_pgsql | platform/Linux_x86_64-gcc3/pylib27/psycopg2/extras.py | 5 | 23380 | """Miscellaneous goodies for psycopg2
This module is a generic place used to hold little helper functions
and classes untill a better place in the distribution is found.
"""
# psycopg/extras.py - miscellaneous extra goodies for psycopg
#
# Copyright (C) 2003-2010 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import os
import time
import codecs
import warnings
import re as regex
try:
import logging
except:
logging = None
import psycopg2
from psycopg2 import extensions as _ext
from psycopg2.extensions import cursor as _cursor
from psycopg2.extensions import connection as _connection
from psycopg2.extensions import adapt as _A
class DictCursorBase(_cursor):
"""Base class for all dict-like cursors."""
def __init__(self, *args, **kwargs):
if kwargs.has_key('row_factory'):
row_factory = kwargs['row_factory']
del kwargs['row_factory']
else:
raise NotImplementedError(
"DictCursorBase can't be instantiated without a row factory.")
_cursor.__init__(self, *args, **kwargs)
self._query_executed = 0
self._prefetch = 0
self.row_factory = row_factory
def fetchone(self):
if self._prefetch:
res = _cursor.fetchone(self)
if self._query_executed:
self._build_index()
if not self._prefetch:
res = _cursor.fetchone(self)
return res
def fetchmany(self, size=None):
if self._prefetch:
res = _cursor.fetchmany(self, size)
if self._query_executed:
self._build_index()
if not self._prefetch:
res = _cursor.fetchmany(self, size)
return res
def fetchall(self):
if self._prefetch:
res = _cursor.fetchall(self)
if self._query_executed:
self._build_index()
if not self._prefetch:
res = _cursor.fetchall(self)
return res
def next(self):
if self._prefetch:
res = _cursor.fetchone(self)
if res is None:
raise StopIteration()
if self._query_executed:
self._build_index()
if not self._prefetch:
res = _cursor.fetchone(self)
if res is None:
raise StopIteration()
return res
class DictConnection(_connection):
"""A connection that uses `DictCursor` automatically."""
def cursor(self, name=None):
if name is None:
return _connection.cursor(self, cursor_factory=DictCursor)
else:
return _connection.cursor(self, name, cursor_factory=DictCursor)
class DictCursor(DictCursorBase):
"""A cursor that keeps a list of column name -> index mappings."""
def __init__(self, *args, **kwargs):
kwargs['row_factory'] = DictRow
DictCursorBase.__init__(self, *args, **kwargs)
self._prefetch = 1
def execute(self, query, vars=None):
self.index = {}
self._query_executed = 1
return _cursor.execute(self, query, vars)
def callproc(self, procname, vars=None):
self.index = {}
self._query_executed = 1
return _cursor.callproc(self, procname, vars)
def _build_index(self):
if self._query_executed == 1 and self.description:
for i in range(len(self.description)):
self.index[self.description[i][0]] = i
self._query_executed = 0
class DictRow(list):
"""A row object that allow by-colmun-name access to data."""
__slots__ = ('_index',)
def __init__(self, cursor):
self._index = cursor.index
self[:] = [None] * len(cursor.description)
def __getitem__(self, x):
if type(x) != int:
x = self._index[x]
return list.__getitem__(self, x)
def __setitem__(self, x, v):
if type(x) != int:
x = self._index[x]
list.__setitem__(self, x, v)
def items(self):
res = []
for n, v in self._index.items():
res.append((n, list.__getitem__(self, v)))
return res
def keys(self):
return self._index.keys()
def values(self):
return tuple(self[:])
def has_key(self, x):
return self._index.has_key(x)
def get(self, x, default=None):
try:
return self[x]
except:
return default
def iteritems(self):
for n, v in self._index.items():
yield n, list.__getitem__(self, v)
def iterkeys(self):
return self._index.iterkeys()
def itervalues(self):
return list.__iter__(self)
def copy(self):
return dict(self.items())
def __contains__(self, x):
return self._index.__contains__(x)
class RealDictConnection(_connection):
"""A connection that uses `RealDictCursor` automatically."""
def cursor(self, name=None):
if name is None:
return _connection.cursor(self, cursor_factory=RealDictCursor)
else:
return _connection.cursor(self, name, cursor_factory=RealDictCursor)
class RealDictCursor(DictCursorBase):
"""A cursor that uses a real dict as the base type for rows.
Note that this cursor is extremely specialized and does not allow
the normal access (using integer indices) to fetched data. If you need
to access database rows both as a dictionary and a list, then use
the generic `DictCursor` instead of `!RealDictCursor`.
"""
def __init__(self, *args, **kwargs):
kwargs['row_factory'] = RealDictRow
DictCursorBase.__init__(self, *args, **kwargs)
self._prefetch = 0
def execute(self, query, vars=None):
self.column_mapping = []
self._query_executed = 1
return _cursor.execute(self, query, vars)
def callproc(self, procname, vars=None):
self.column_mapping = []
self._query_executed = 1
return _cursor.callproc(self, procname, vars)
def _build_index(self):
if self._query_executed == 1 and self.description:
for i in range(len(self.description)):
self.column_mapping.append(self.description[i][0])
self._query_executed = 0
class RealDictRow(dict):
"""A ``dict`` subclass representing a data record."""
__slots__ = ('_column_mapping')
def __init__(self, cursor):
dict.__init__(self)
self._column_mapping = cursor.column_mapping
def __setitem__(self, name, value):
if type(name) == int:
name = self._column_mapping[name]
return dict.__setitem__(self, name, value)
class NamedTupleConnection(_connection):
"""A connection that uses `NamedTupleCursor` automatically."""
def cursor(self, *args, **kwargs):
kwargs['cursor_factory'] = NamedTupleCursor
return _connection.cursor(self, *args, **kwargs)
class NamedTupleCursor(_cursor):
"""A cursor that generates results as |namedtuple|__.
`!fetch*()` methods will return named tuples instead of regular tuples, so
their elements can be accessed both as regular numeric items as well as
attributes.
>>> nt_cur = conn.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)
>>> rec = nt_cur.fetchone()
>>> rec
Record(id=1, num=100, data="abc'def")
>>> rec[1]
100
>>> rec.data
"abc'def"
.. |namedtuple| replace:: `!namedtuple`
.. __: http://docs.python.org/release/2.6/library/collections.html#collections.namedtuple
"""
Record = None
def execute(self, query, vars=None):
self.Record = None
return _cursor.execute(self, query, vars)
def executemany(self, query, vars):
self.Record = None
return _cursor.executemany(self, vars)
def callproc(self, procname, vars=None):
self.Record = None
return _cursor.callproc(self, procname, vars)
def fetchone(self):
t = _cursor.fetchone(self)
if t is not None:
nt = self.Record
if nt is None:
nt = self.Record = self._make_nt()
return nt(*t)
def fetchmany(self, size=None):
nt = self.Record
if nt is None:
nt = self.Record = self._make_nt()
ts = _cursor.fetchmany(self, size)
return [nt(*t) for t in ts]
def fetchall(self):
nt = self.Record
if nt is None:
nt = self.Record = self._make_nt()
ts = _cursor.fetchall(self)
return [nt(*t) for t in ts]
def __iter__(self):
return iter(self.fetchall())
try:
from collections import namedtuple
except ImportError, _exc:
def _make_nt(self):
raise self._exc
else:
def _make_nt(self, namedtuple=namedtuple):
return namedtuple("Record", [d[0] for d in self.description or ()])
class LoggingConnection(_connection):
"""A connection that logs all queries to a file or logger__ object.
.. __: http://docs.python.org/library/logging.html
"""
def initialize(self, logobj):
"""Initialize the connection to log to ``logobj``.
The ``logobj`` parameter can be an open file object or a Logger
instance from the standard logging module.
"""
self._logobj = logobj
if logging and isinstance(logobj, logging.Logger):
self.log = self._logtologger
else:
self.log = self._logtofile
def filter(self, msg, curs):
"""Filter the query before logging it.
This is the method to overwrite to filter unwanted queries out of the
log or to add some extra data to the output. The default implementation
just does nothing.
"""
return msg
def _logtofile(self, msg, curs):
msg = self.filter(msg, curs)
if msg: self._logobj.write(msg + os.linesep)
def _logtologger(self, msg, curs):
msg = self.filter(msg, curs)
if msg: self._logobj.debug(msg)
def _check(self):
if not hasattr(self, '_logobj'):
raise self.ProgrammingError(
"LoggingConnection object has not been initialize()d")
def cursor(self, name=None):
self._check()
if name is None:
return _connection.cursor(self, cursor_factory=LoggingCursor)
else:
return _connection.cursor(self, name, cursor_factory=LoggingCursor)
class LoggingCursor(_cursor):
"""A cursor that logs queries using its connection logging facilities."""
def execute(self, query, vars=None):
try:
return _cursor.execute(self, query, vars)
finally:
self.connection.log(self.query, self)
def callproc(self, procname, vars=None):
try:
return _cursor.callproc(self, procname, vars)
finally:
self.connection.log(self.query, self)
class MinTimeLoggingConnection(LoggingConnection):
"""A connection that logs queries based on execution time.
This is just an example of how to sub-class `LoggingConnection` to
provide some extra filtering for the logged queries. Both the
`inizialize()` and `filter()` methods are overwritten to make sure
that only queries executing for more than ``mintime`` ms are logged.
Note that this connection uses the specialized cursor
`MinTimeLoggingCursor`.
"""
def initialize(self, logobj, mintime=0):
LoggingConnection.initialize(self, logobj)
self._mintime = mintime
def filter(self, msg, curs):
t = (time.time() - curs.timestamp) * 1000
if t > self._mintime:
return msg + os.linesep + " (execution time: %d ms)" % t
def cursor(self, name=None):
self._check()
if name is None:
return _connection.cursor(self, cursor_factory=MinTimeLoggingCursor)
else:
return _connection.cursor(self, name, cursor_factory=MinTimeLoggingCursor)
class MinTimeLoggingCursor(LoggingCursor):
"""The cursor sub-class companion to `MinTimeLoggingConnection`."""
def execute(self, query, vars=None):
self.timestamp = time.time()
return LoggingCursor.execute(self, query, vars)
def callproc(self, procname, vars=None):
self.timestamp = time.time()
return LoggingCursor.execute(self, procname, vars)
# a dbtype and adapter for Python UUID type
try:
import uuid
class UUID_adapter(object):
"""Adapt Python's uuid.UUID__ type to PostgreSQL's uuid__.
.. __: http://docs.python.org/library/uuid.html
.. __: http://www.postgresql.org/docs/8.4/static/datatype-uuid.html
"""
def __init__(self, uuid):
self._uuid = uuid
def prepare(self, conn):
pass
def getquoted(self):
return "'"+str(self._uuid)+"'::uuid"
__str__ = getquoted
def register_uuid(oids=None, conn_or_curs=None):
"""Create the UUID type and an uuid.UUID adapter."""
if not oids:
oid1 = 2950
oid2 = 2951
elif type(oids) == list:
oid1, oid2 = oids
else:
oid1 = oids
oid2 = 2951
def parseUUIDARRAY(data, cursor):
if data is None:
return None
elif data == '{}':
return []
else:
return [((len(x) > 0 and x != 'NULL') and uuid.UUID(x) or None)
for x in data[1:-1].split(',')]
_ext.UUID = _ext.new_type((oid1, ), "UUID",
lambda data, cursor: data and uuid.UUID(data) or None)
_ext.UUIDARRAY = _ext.new_type((oid2,), "UUID[]", parseUUIDARRAY)
_ext.register_type(_ext.UUID, conn_or_curs)
_ext.register_type(_ext.UUIDARRAY, conn_or_curs)
_ext.register_adapter(uuid.UUID, UUID_adapter)
return _ext.UUID
except ImportError, e:
def register_uuid(oid=None):
"""Create the UUID type and an uuid.UUID adapter.
This is a fake function that will always raise an error because the
import of the uuid module failed.
"""
raise e
# a type, dbtype and adapter for PostgreSQL inet type
class Inet(object):
"""Wrap a string to allow for correct SQL-quoting of inet values.
Note that this adapter does NOT check the passed value to make
sure it really is an inet-compatible address but DOES call adapt()
on it to make sure it is impossible to execute an SQL-injection
by passing an evil value to the initializer.
"""
def __init__(self, addr):
self.addr = addr
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.addr)
def prepare(self, conn):
self._conn = conn
def getquoted(self):
obj = _A(self.addr)
if hasattr(obj, 'prepare'):
obj.prepare(self._conn)
return obj.getquoted()+"::inet"
def __conform__(self, foo):
if foo is _ext.ISQLQuote:
return self
def __str__(self):
return str(self.addr)
def register_inet(oid=None, conn_or_curs=None):
"""Create the INET type and an Inet adapter."""
if not oid: oid = 869
_ext.INET = _ext.new_type((oid, ), "INET",
lambda data, cursor: data and Inet(data) or None)
_ext.register_type(_ext.INET, conn_or_curs)
return _ext.INET
def register_tstz_w_secs(oids=None, conn_or_curs=None):
"""The function used to register an alternate type caster for
:sql:`TIMESTAMP WITH TIME ZONE` to deal with historical time zones with
seconds in the UTC offset.
These are now correctly handled by the default type caster, so currently
the function doesn't do anything.
"""
warnings.warn("deprecated", DeprecationWarning)
import select
from psycopg2.extensions import POLL_OK, POLL_READ, POLL_WRITE
from psycopg2 import OperationalError
def wait_select(conn):
"""Wait until a connection or cursor has data available.
The function is an example of a wait callback to be registered with
`~psycopg2.extensions.set_wait_callback()`. This function uses `!select()`
to wait for data available.
"""
while 1:
state = conn.poll()
if state == POLL_OK:
break
elif state == POLL_READ:
select.select([conn.fileno()], [], [])
elif state == POLL_WRITE:
select.select([], [conn.fileno()], [])
else:
raise OperationalError("bad state from poll: %s" % state)
class HstoreAdapter(object):
"""Adapt a Python dict to the hstore syntax."""
def __init__(self, wrapped):
self.wrapped = wrapped
def prepare(self, conn):
self.conn = conn
# use an old-style getquoted implementation if required
if conn.server_version < 90000:
self.getquoted = self._getquoted_8
def _getquoted_8(self):
"""Use the operators available in PG pre-9.0."""
if not self.wrapped:
return "''::hstore"
adapt = _ext.adapt
rv = []
for k, v in self.wrapped.iteritems():
k = adapt(k)
k.prepare(self.conn)
k = k.getquoted()
if v is not None:
v = adapt(v)
v.prepare(self.conn)
v = v.getquoted()
else:
v = 'NULL'
rv.append("(%s => %s)" % (k, v))
return "(" + '||'.join(rv) + ")"
def _getquoted_9(self):
"""Use the hstore(text[], text[]) function."""
if not self.wrapped:
return "''::hstore"
k = _ext.adapt(self.wrapped.keys())
k.prepare(self.conn)
v = _ext.adapt(self.wrapped.values())
v.prepare(self.conn)
return "hstore(%s, %s)" % (k.getquoted(), v.getquoted())
getquoted = _getquoted_9
_re_hstore = regex.compile(r"""
# hstore key:
# a string of normal or escaped chars
"((?: [^"\\] | \\. )*)"
\s*=>\s* # hstore value
(?:
NULL # the value can be null - not catched
# or a quoted string like the key
| "((?: [^"\\] | \\. )*)"
)
(?:\s*,\s*|$) # pairs separated by comma or end of string.
""", regex.VERBOSE)
# backslash decoder
_bsdec = codecs.getdecoder("string_escape")
def parse(self, s, cur, _decoder=_bsdec):
"""Parse an hstore representation in a Python string.
The hstore is represented as something like::
"a"=>"1", "b"=>"2"
with backslash-escaped strings.
"""
if s is None:
return None
rv = {}
start = 0
for m in self._re_hstore.finditer(s):
if m is None or m.start() != start:
raise psycopg2.InterfaceError(
"error parsing hstore pair at char %d" % start)
k = _decoder(m.group(1))[0]
v = m.group(2)
if v is not None:
v = _decoder(v)[0]
rv[k] = v
start = m.end()
if start < len(s):
raise psycopg2.InterfaceError(
"error parsing hstore: unparsed data after char %d" % start)
return rv
parse = classmethod(parse)
def parse_unicode(self, s, cur):
"""Parse an hstore returning unicode keys and values."""
codec = codecs.getdecoder(_ext.encodings[cur.connection.encoding])
bsdec = self._bsdec
decoder = lambda s: codec(bsdec(s)[0])
return self.parse(s, cur, _decoder=decoder)
parse_unicode = classmethod(parse_unicode)
@classmethod
def get_oids(self, conn_or_curs):
"""Return the oid of the hstore and hstore[] types.
Return None if hstore is not available.
"""
if hasattr(conn_or_curs, 'execute'):
conn = conn_or_curs.connection
curs = conn_or_curs
else:
conn = conn_or_curs
curs = conn_or_curs.cursor()
# Store the transaction status of the connection to revert it after use
conn_status = conn.status
# column typarray not available before PG 8.3
typarray = conn.server_version >= 80300 and "typarray" or "NULL"
# get the oid for the hstore
curs.execute("""\
SELECT t.oid, %s
FROM pg_type t JOIN pg_namespace ns
ON typnamespace = ns.oid
WHERE typname = 'hstore' and nspname = 'public';
""" % typarray)
oids = curs.fetchone()
# revert the status of the connection as before the command
if (conn_status != _ext.STATUS_IN_TRANSACTION
and conn.isolation_level != _ext.ISOLATION_LEVEL_AUTOCOMMIT):
conn.rollback()
return oids
def register_hstore(conn_or_curs, globally=False, unicode=False):
"""Register adapter and typecaster for `dict`\-\ |hstore| conversions.
The function must receive a connection or cursor as the |hstore| oid is
different in each database. The typecaster will normally be registered
only on the connection or cursor passed as argument. If your application
uses a single database you can pass *globally*\=True to have the typecaster
registered on all the connections.
By default the returned dicts will have `str` objects as keys and values:
use *unicode*\=True to return `unicode` objects instead. When adapting a
dictionary both `str` and `unicode` keys and values are handled (the
`unicode` values will be converted according to the current
`~connection.encoding`).
The |hstore| contrib module must be already installed in the database
(executing the ``hstore.sql`` script in your ``contrib`` directory).
Raise `~psycopg2.ProgrammingError` if the type is not found.
"""
oids = HstoreAdapter.get_oids(conn_or_curs)
if oids is None:
raise psycopg2.ProgrammingError(
"hstore type not found in the database. "
"please install it from your 'contrib/hstore.sql' file")
# create and register the typecaster
if unicode:
cast = HstoreAdapter.parse_unicode
else:
cast = HstoreAdapter.parse
HSTORE = _ext.new_type((oids[0],), "HSTORE", cast)
_ext.register_type(HSTORE, not globally and conn_or_curs or None)
_ext.register_adapter(dict, HstoreAdapter)
__all__ = filter(lambda k: not k.startswith('_'), locals().keys())
| mpl-2.0 | -8,806,729,448,066,326,000 | 30.852861 | 93 | 0.59799 | false |
ericfrederich/graphviz | setup.py | 2 | 1298 | # setup.py
from setuptools import setup, find_packages
setup(
name='graphviz',
version='0.4.6.dev0',
author='Sebastian Bank',
author_email='[email protected]',
description='Simple Python interface for Graphviz',
keywords='graph visualization dot render',
license='MIT',
url='http://github.com/xflr6/graphviz',
packages=find_packages(),
extras_require={
'test': ['unittest2', 'nose', 'coverage', 'flake8', 'pep8-naming'],
'dev': ['wheel'],
},
platforms='any',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| mit | -6,055,215,827,509,615,000 | 34.081081 | 75 | 0.602465 | false |
Lilywei123/tempest | tempest/stress/actions/volume_attach_verify.py | 4 | 9860 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest import config
import tempest.stress.stressaction as stressaction
import tempest.test
import re
CONF = config.CONF
class VolumeVerifyStress(stressaction.StressAction):
def _create_keypair(self):
keyname = data_utils.rand_name("key")
_, self.key = self.manager.keypairs_client.create_keypair(keyname)
def _delete_keypair(self):
self.manager.keypairs_client.delete_keypair(self.key['name'])
def _create_vm(self):
self.name = name = data_utils.rand_name("instance")
servers_client = self.manager.servers_client
self.logger.info("creating %s" % name)
vm_args = self.vm_extra_args.copy()
vm_args['security_groups'] = [self.sec_grp]
vm_args['key_name'] = self.key['name']
_, server = servers_client.create_server(name, self.image,
self.flavor,
**vm_args)
self.server_id = server['id']
self.manager.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
def _destroy_vm(self):
self.logger.info("deleting server: %s" % self.server_id)
self.manager.servers_client.delete_server(self.server_id)
self.manager.servers_client.wait_for_server_termination(self.server_id)
self.logger.info("deleted server: %s" % self.server_id)
def _create_sec_group(self):
sec_grp_cli = self.manager.security_groups_client
s_name = data_utils.rand_name('sec_grp-')
s_description = data_utils.rand_name('desc-')
_, self.sec_grp = sec_grp_cli.create_security_group(s_name,
s_description)
create_rule = sec_grp_cli.create_security_group_rule
create_rule(self.sec_grp['id'], 'tcp', 22, 22)
create_rule(self.sec_grp['id'], 'icmp', -1, -1)
def _destroy_sec_grp(self):
sec_grp_cli = self.manager.security_groups_client
sec_grp_cli.delete_security_group(self.sec_grp['id'])
def _create_floating_ip(self):
floating_cli = self.manager.floating_ips_client
_, self.floating = floating_cli.create_floating_ip(self.floating_pool)
def _destroy_floating_ip(self):
cli = self.manager.floating_ips_client
cli.delete_floating_ip(self.floating['id'])
cli.wait_for_resource_deletion(self.floating['id'])
self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
def _create_volume(self):
name = data_utils.rand_name("volume")
self.logger.info("creating volume: %s" % name)
volumes_client = self.manager.volumes_client
_, self.volume = volumes_client.create_volume(
size=1,
display_name=name)
volumes_client.wait_for_volume_status(self.volume['id'],
'available')
self.logger.info("created volume: %s" % self.volume['id'])
def _delete_volume(self):
self.logger.info("deleting volume: %s" % self.volume['id'])
volumes_client = self.manager.volumes_client
volumes_client.delete_volume(self.volume['id'])
volumes_client.wait_for_resource_deletion(self.volume['id'])
self.logger.info("deleted volume: %s" % self.volume['id'])
def _wait_disassociate(self):
cli = self.manager.floating_ips_client
def func():
_, floating = cli.get_floating_ip_details(self.floating['id'])
return floating['instance_id'] is None
if not tempest.test.call_until_true(func, CONF.compute.build_timeout,
CONF.compute.build_interval):
raise RuntimeError("IP disassociate timeout!")
def new_server_ops(self):
self._create_vm()
cli = self.manager.floating_ips_client
cli.associate_floating_ip_to_server(self.floating['ip'],
self.server_id)
if self.ssh_test_before_attach and self.enable_ssh_verify:
self.logger.info("Scanning for block devices via ssh on %s"
% self.server_id)
self.part_wait(self.detach_match_count)
def setUp(self, **kwargs):
"""Note able configuration combinations:
Closest options to the test_stamp_pattern:
new_server = True
new_volume = True
enable_ssh_verify = True
ssh_test_before_attach = False
Just attaching:
new_server = False
new_volume = False
enable_ssh_verify = True
ssh_test_before_attach = True
Mostly API load by repeated attachment:
new_server = False
new_volume = False
enable_ssh_verify = False
ssh_test_before_attach = False
Minimal Nova load, but cinder load not decreased:
new_server = False
new_volume = True
enable_ssh_verify = True
ssh_test_before_attach = True
"""
self.image = CONF.compute.image_ref
self.flavor = CONF.compute.flavor_ref
self.vm_extra_args = kwargs.get('vm_extra_args', {})
self.floating_pool = kwargs.get('floating_pool', None)
self.new_volume = kwargs.get('new_volume', True)
self.new_server = kwargs.get('new_server', False)
self.enable_ssh_verify = kwargs.get('enable_ssh_verify', True)
self.ssh_test_before_attach = kwargs.get('ssh_test_before_attach',
False)
self.part_line_re = re.compile(kwargs.get('part_line_re', '.*vd.*'))
self.detach_match_count = kwargs.get('detach_match_count', 1)
self.attach_match_count = kwargs.get('attach_match_count', 2)
self.part_name = kwargs.get('part_name', '/dev/vdc')
self._create_floating_ip()
self._create_sec_group()
self._create_keypair()
private_key = self.key['private_key']
username = CONF.compute.image_ssh_user
self.remote_client = remote_client.RemoteClient(self.floating['ip'],
username,
pkey=private_key)
if not self.new_volume:
self._create_volume()
if not self.new_server:
self.new_server_ops()
# now we just test is number of partition increased or decrised
def part_wait(self, num_match):
def _part_state():
self.partitions = self.remote_client.get_partitions().split('\n')
matching = 0
for part_line in self.partitions[1:]:
if self.part_line_re.match(part_line):
matching += 1
return matching == num_match
if tempest.test.call_until_true(_part_state,
CONF.compute.build_timeout,
CONF.compute.build_interval):
return
else:
raise RuntimeError("Unexpected partitions: %s",
str(self.partitions))
def run(self):
if self.new_server:
self.new_server_ops()
if self.new_volume:
self._create_volume()
servers_client = self.manager.servers_client
self.logger.info("attach volume (%s) to vm %s" %
(self.volume['id'], self.server_id))
servers_client.attach_volume(self.server_id,
self.volume['id'],
self.part_name)
self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
'in-use')
if self.enable_ssh_verify:
self.logger.info("Scanning for new block device on %s"
% self.server_id)
self.part_wait(self.attach_match_count)
servers_client.detach_volume(self.server_id,
self.volume['id'])
self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
'available')
if self.enable_ssh_verify:
self.logger.info("Scanning for block device disapperance on %s"
% self.server_id)
self.part_wait(self.detach_match_count)
if self.new_volume:
self._delete_volume()
if self.new_server:
self._destroy_vm()
def tearDown(self):
cli = self.manager.floating_ips_client
cli.disassociate_floating_ip_from_server(self.floating['ip'],
self.server_id)
self._wait_disassociate()
if not self.new_server:
self._destroy_vm()
self._delete_keypair()
self._destroy_floating_ip()
self._destroy_sec_grp()
if not self.new_volume:
self._delete_volume()
| apache-2.0 | 4,774,138,606,945,633,000 | 43.017857 | 79 | 0.559939 | false |
wjchen84/rapprentice | rapprentice/plotting_openrave.py | 3 | 1313 | import numpy as np
def draw_grid(env, f, mins, maxes, xres = .1, yres = .1, zres = .04):
xmin, ymin, zmin = mins
xmax, ymax, zmax = maxes
nfine = 30
xcoarse = np.arange(xmin, xmax, xres)
ycoarse = np.arange(ymin, ymax, yres)
if zres == -1: zcoarse = [(zmin+zmax)/2.]
else: zcoarse = np.arange(zmin, zmax, zres)
xfine = np.linspace(xmin, xmax, nfine)
yfine = np.linspace(ymin, ymax, nfine)
zfine = np.linspace(zmin, zmax, nfine)
lines = []
if len(zcoarse) > 1:
for x in xcoarse:
for y in ycoarse:
xyz = np.zeros((nfine, 3))
xyz[:,0] = x
xyz[:,1] = y
xyz[:,2] = zfine
lines.append(f(xyz))
for y in ycoarse:
for z in zcoarse:
xyz = np.zeros((nfine, 3))
xyz[:,0] = xfine
xyz[:,1] = y
xyz[:,2] = z
lines.append(f(xyz))
for z in zcoarse:
for x in xcoarse:
xyz = np.zeros((nfine, 3))
xyz[:,0] = x
xyz[:,1] = yfine
xyz[:,2] = z
lines.append(f(xyz))
handles = []
for line in lines:
handles.append(env.drawlinestrip(line,1,(1,1,0,1)))
return handles
| bsd-2-clause | 6,836,653,840,091,025,000 | 26.354167 | 69 | 0.460777 | false |
dhermes/google-cloud-python | spanner/google/cloud/spanner_v1/session.py | 2 | 12382 | # Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for Cloud Spanner Session objects."""
from functools import total_ordering
import time
from google.rpc.error_details_pb2 import RetryInfo
# pylint: disable=ungrouped-imports
from google.api_core.exceptions import Aborted, GoogleAPICallError, NotFound
import google.api_core.gapic_v1.method
from google.cloud.spanner_v1._helpers import _metadata_with_prefix
from google.cloud.spanner_v1.batch import Batch
from google.cloud.spanner_v1.snapshot import Snapshot
from google.cloud.spanner_v1.transaction import Transaction
# pylint: enable=ungrouped-imports
DEFAULT_RETRY_TIMEOUT_SECS = 30
"""Default timeout used by :meth:`Session.run_in_transaction`."""
@total_ordering
class Session(object):
"""Representation of a Cloud Spanner Session.
We can use a :class:`Session` to:
* :meth:`create` the session
* Use :meth:`exists` to check for the existence of the session
* :meth:`drop` the session
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: The database to which the session is bound.
:type labels: dict (str -> str)
:param labels: (Optional) User-assigned labels for the session.
"""
_session_id = None
_transaction = None
def __init__(self, database, labels=None):
self._database = database
if labels is None:
labels = {}
self._labels = labels
def __lt__(self, other):
return self._session_id < other._session_id
@property
def session_id(self):
"""Read-only ID, set by the back-end during :meth:`create`."""
return self._session_id
@property
def labels(self):
"""User-assigned labels for the session.
:rtype: dict (str -> str)
:returns: the labels dict (empty if no labels were assigned.
"""
return self._labels
@property
def name(self):
"""Session name used in requests.
.. note::
This property will not change if ``session_id`` does not, but the
return value is not cached.
The session name is of the form
``"projects/../instances/../databases/../sessions/{session_id}"``
:rtype: str
:returns: The session name.
:raises ValueError: if session is not yet created
"""
if self._session_id is None:
raise ValueError("No session ID set by back-end")
return self._database.name + "/sessions/" + self._session_id
def create(self):
"""Create this session, bound to its database.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.Spanner.CreateSession
:raises: :exc:`ValueError` if :attr:`session_id` is already set.
"""
if self._session_id is not None:
raise ValueError("Session ID already set by back-end")
api = self._database.spanner_api
metadata = _metadata_with_prefix(self._database.name)
kw = {}
if self._labels:
kw = {"session": {"labels": self._labels}}
session_pb = api.create_session(self._database.name, metadata=metadata, **kw)
self._session_id = session_pb.name.split("/")[-1]
def exists(self):
"""Test for the existence of this session.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.Spanner.GetSession
:rtype: bool
:returns: True if the session exists on the back-end, else False.
"""
if self._session_id is None:
return False
api = self._database.spanner_api
metadata = _metadata_with_prefix(self._database.name)
try:
api.get_session(self.name, metadata=metadata)
except NotFound:
return False
return True
def delete(self):
"""Delete this session.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.Spanner.GetSession
:raises ValueError: if :attr:`session_id` is not already set.
:raises NotFound: if the session does not exist
"""
if self._session_id is None:
raise ValueError("Session ID not set by back-end")
api = self._database.spanner_api
metadata = _metadata_with_prefix(self._database.name)
api.delete_session(self.name, metadata=metadata)
def snapshot(self, **kw):
"""Create a snapshot to perform a set of reads with shared staleness.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.TransactionOptions.ReadOnly
:type kw: dict
:param kw: Passed through to
:class:`~google.cloud.spanner_v1.snapshot.Snapshot` ctor.
:rtype: :class:`~google.cloud.spanner_v1.snapshot.Snapshot`
:returns: a snapshot bound to this session
:raises ValueError: if the session has not yet been created.
"""
if self._session_id is None:
raise ValueError("Session has not been created.")
return Snapshot(self, **kw)
def read(self, table, columns, keyset, index="", limit=0):
"""Perform a ``StreamingRead`` API request for rows in a table.
:type table: str
:param table: name of the table from which to fetch data
:type columns: list of str
:param columns: names of columns to be retrieved
:type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet`
:param keyset: keys / ranges identifying rows to be retrieved
:type index: str
:param index: (Optional) name of index to use, rather than the
table's primary key
:type limit: int
:param limit: (Optional) maximum number of rows to return
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
"""
return self.snapshot().read(table, columns, keyset, index, limit)
def execute_sql(
self,
sql,
params=None,
param_types=None,
query_mode=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
):
"""Perform an ``ExecuteStreamingSql`` API request.
:type sql: str
:param sql: SQL query statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``sql``.
:type param_types:
dict, {str -> :class:`google.spanner.v1.type_pb2.TypeCode`}
:param param_types: (Optional) explicit types for one or more param
values; overrides default type detection on the
back-end.
:type query_mode:
:class:`google.spanner.v1.spanner_pb2.ExecuteSqlRequest.QueryMode`
:param query_mode: Mode governing return of results / query plan. See
https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
"""
return self.snapshot().execute_sql(
sql, params, param_types, query_mode, retry=retry, timeout=timeout
)
def batch(self):
"""Factory to create a batch for this session.
:rtype: :class:`~google.cloud.spanner_v1.batch.Batch`
:returns: a batch bound to this session
:raises ValueError: if the session has not yet been created.
"""
if self._session_id is None:
raise ValueError("Session has not been created.")
return Batch(self)
def transaction(self):
"""Create a transaction to perform a set of reads with shared staleness.
:rtype: :class:`~google.cloud.spanner_v1.transaction.Transaction`
:returns: a transaction bound to this session
:raises ValueError: if the session has not yet been created.
"""
if self._session_id is None:
raise ValueError("Session has not been created.")
if self._transaction is not None:
self._transaction._rolled_back = True
del self._transaction
txn = self._transaction = Transaction(self)
return txn
def run_in_transaction(self, func, *args, **kw):
"""Perform a unit of work in a transaction, retrying on abort.
:type func: callable
:param func: takes a required positional argument, the transaction,
and additional positional / keyword arguments as supplied
by the caller.
:type args: tuple
:param args: additional positional arguments to be passed to ``func``.
:type kw: dict
:param kw: optional keyword arguments to be passed to ``func``.
If passed, "timeout_secs" will be removed and used to
override the default timeout.
:rtype: Any
:returns: The return value of ``func``.
:raises Exception:
reraises any non-ABORT execptions raised by ``func``.
"""
deadline = time.time() + kw.pop("timeout_secs", DEFAULT_RETRY_TIMEOUT_SECS)
while True:
if self._transaction is None:
txn = self.transaction()
else:
txn = self._transaction
if txn._transaction_id is None:
txn.begin()
try:
return_value = func(txn, *args, **kw)
except Aborted as exc:
del self._transaction
_delay_until_retry(exc, deadline)
continue
except GoogleAPICallError:
del self._transaction
raise
except Exception:
txn.rollback()
raise
try:
txn.commit()
except Aborted as exc:
del self._transaction
_delay_until_retry(exc, deadline)
except GoogleAPICallError:
del self._transaction
raise
else:
return return_value
# pylint: disable=misplaced-bare-raise
#
# Rational: this function factors out complex shared deadline / retry
# handling from two `except:` clauses.
def _delay_until_retry(exc, deadline):
"""Helper for :meth:`Session.run_in_transaction`.
Detect retryable abort, and impose server-supplied delay.
:type exc: :class:`google.api_core.exceptions.Aborted`
:param exc: exception for aborted transaction
:type deadline: float
:param deadline: maximum timestamp to continue retrying the transaction.
"""
cause = exc.errors[0]
now = time.time()
if now >= deadline:
raise
delay = _get_retry_delay(cause)
if delay is not None:
if now + delay > deadline:
raise
time.sleep(delay)
# pylint: enable=misplaced-bare-raise
def _get_retry_delay(cause):
"""Helper for :func:`_delay_until_retry`.
:type exc: :class:`grpc.Call`
:param exc: exception for aborted transaction
:rtype: float
:returns: seconds to wait before retrying the transaction.
"""
metadata = dict(cause.trailing_metadata())
retry_info_pb = metadata.get("google.rpc.retryinfo-bin")
if retry_info_pb is not None:
retry_info = RetryInfo()
retry_info.ParseFromString(retry_info_pb)
nanos = retry_info.retry_delay.nanos
return retry_info.retry_delay.seconds + nanos / 1.0e9
| apache-2.0 | 8,085,130,178,526,806,000 | 32.646739 | 123 | 0.620417 | false |
SyntaxVoid/PyFusionGUI | GUI/ClusteringWindow.py | 1 | 6495 | # tkinter
try: # Will work with python 3
import tkinter as tk
except ImportError: # Will work with python 2
import Tkinter as tk
from CONSTANTS import *
import subprocess
from Utilities import jtools as jt
class ClusteringWindow:
def __init__(self, master, slurm_start_time, jobid):
# This Toplevel window pops up when the user begins a clustering job. There are several ways to create a
# ClusteringWindow object, but since python doesn't allow for multiple class constructors, we are stuck
# using if/else statements instead.
self.master = master
self.root = tk.Toplevel(master=self.master)
self.root.resizable(height=False, width=False)
self.message_frame = tk.Frame(master=self.root)
self.message_frame.grid(row=0, column=0, sticky=tk.N)
self.buttons_frame = tk.Frame(master=self.root, bd=5, relief=tk.SUNKEN)
self.buttons_frame.grid(row=1, column=0, sticky=tk.N)
self.message = tk.StringVar(master=self.message_frame)
self.label = tk.Label(master=self.message_frame, textvariable=self.message, font=(font_name, 24))
self.label.grid(row=0, column=0, sticky=tk.N)
self.root.grab_set()
self.root.wm_protocol("WM_DELETE_WINDOW", self.verify_cancel)
self.root.bind("<<clustering_failed>>", self.clustering_failed)
self.root.bind("<<slurm_clustering_complete>>", self.slurm_clustering_complete)
self.default_wait_time = 5 # Wait 5 seconds before refreshing squeue status
self.slurm_start_time = slurm_start_time
self.jobid = jobid
self.root.title("Clustering in Progress")
self.ANobj_file = IRIS_CSCRATCH_DIR+self.slurm_start_time+".ANobj"
self.error_file = os.path.join(SLURM_DIR, "errors.txt")
self._cur = self.default_wait_time
self.cancel_button = tk.Button(master=self.message_frame, text="Cancel", command=self.verify_cancel)
self.cancel_button.grid(row=1, column=0, sticky=tk.N)
self.hint_label = tk.Label(master=self.message_frame, text="If the window freezes, it is loading\n"
"the finished Analysis object.",
font=(font_name, 12))
self.hint_label.grid(row=2, column=0, sticky=tk.N)
self.total_time = 0
self.message.set("Waiting for worker\nnode to complete\njob # {}.\n"
"Checking again in:\n{} seconds.\n"
"Total time elapsed:\n{} seconds.".format(self.jobid, self._cur, self.total_time))
return
def start(self):
self.root.after(1000, self.countdown)
return
def verify_cancel(self):
win = tk.Toplevel(master=self.root)
win.resizable(width=False, height=False)
win.grab_set()
label = tk.Label(master=win, text="Do you really wish\nto close?", font=(font_name, 18))
label.grid(row=0, column=0, columnspan=2, sticky=tk.N)
yes = tk.Button(master=win, text="Yes", font=(font_name, 18), command=self.yes_cancel)
yes.grid(row=1, column=0, sticky=tk.N)
no = tk.Button(master=win, text="No", font=(font_name, 18), command=win.destroy)
no.grid(row=1, column=1, sticky=tk.N)
return
def yes_cancel(self):
subprocess.check_output("scancel {}".format(self.jobid), shell=True)
self.root.destroy()
return
def countdown(self):
self._cur -= 1
self.total_time += 1
if self._cur <= 0:
sjobexitmod_output = subprocess.check_output("sjobexitmod -l {}".format(self.jobid), shell=True)
exit_state = jt.get_slurm_exit_state(sjobexitmod_output)
if exit_state == "PENDING" or exit_state == "assigned":
self._cur = self.default_wait_time
elif exit_state == "RUNNING":
self._cur = self.default_wait_time
elif exit_state == "COMPLETED":
self.message.set("Clustering is complete!\nPlease wait while the\nthe object is loaded.\n"
"Total time elapsed:\n{} seconds".format(self.total_time))
self.root.event_generate("<<slurm_clustering_complete>>", when="tail")
return
elif exit_state == "FAILED":
self.root.event_generate("<<clustering_failed>>", when="tail")
return
elif exit_state == "CANCELLED+":
self.root.event_generate("<<clustering_failed>>", when="tail")
else:
print("UNKNOWN EXIT STATE: ({})".format(exit_state))
self.message.set("Waiting for worker\nnode to complete\njob # {}.\n"
"Checking again in:\n{} seconds.\n"
"Total time elapsed:\n{} seconds.".format(self.jobid, self._cur, self.total_time))
self.root.after(1000, self.countdown)
return
def slurm_clustering_complete(self, e):
self.root.title("SLURM Clustering Complete!")
self.root.wm_protocol("WM_DELETE_WINDOW", self.root.destroy)
# self.root.geometry("330x320")
self.message.set("SLURM clustering complete!\nYour Analysis object\nwas saved to:\n{}\n"
"Total time elapsed: {} seconds"
.format(jt.break_path(self.ANobj_file, 23), self.total_time))
self.cancel_button.destroy()
ok_button = tk.Button(master=self.root, text="OK", command=self.root.destroy, font=(font_name, 18))
ok_button.grid(row=1, column=0)
self.master.event_generate("<<slurm_clustering_complete>>", when="tail")
return
def clustering_failed(self, e):
self.root.title("Clustering Failed!")
self.root.wm_protocol("WM_DELETE_WINDOW", self.root.destroy)
self.message.set("Clustering Failed! Check\n{}\nfor more details."
.format(jt.break_path(os.path.join(IRIS_CSCRATCH_DIR,
"PyFusionGUI-{}.out".format(self.jobid)), 24)))
self.label.config(fg="red")
self.cancel_button.destroy()
ok_button = tk.Button(master=self.buttons_frame,
text="OK", font=(font_name, 18),
command=self.root.destroy)
ok_button.grid(row=0, column=0, sticky=tk.N)
self.master.event_generate("<<clustering_failed>>", when="tail")
return
| gpl-3.0 | -1,442,307,806,015,061,500 | 50.96 | 112 | 0.598152 | false |
boto/boto3 | tests/functional/docs/test_s3.py | 1 | 1421 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests.functional.docs import BaseDocsFunctionalTests
from boto3.session import Session
from boto3.docs.service import ServiceDocumenter
class TestS3Customizations(BaseDocsFunctionalTests):
def setUp(self):
self.documenter = ServiceDocumenter(
's3', session=Session(region_name='us-east-1'))
self.generated_contents = self.documenter.document_service()
self.generated_contents = self.generated_contents.decode('utf-8')
def test_file_transfer_methods_are_documented(self):
self.assert_contains_lines_in_order([
'.. py:class:: S3.Client',
' * :py:meth:`~S3.Client.download_file`',
' * :py:meth:`~S3.Client.upload_file`',
' .. py:method:: download_file(',
' .. py:method:: upload_file('],
self.generated_contents
)
| apache-2.0 | -6,142,008,855,066,756,000 | 40.794118 | 73 | 0.684025 | false |
shiehinms/vminspector | util.py | 1 | 3436 | import sys
import requests
from time import time
from functools import wraps
from urlparse import urlparse
from os import unlink, makedirs
from os.path import isdir, exists
from optparse import OptionParser
from azure.storage import BlobService
VERSION = 'v1.0.0'
USAGE = 'usage: python %prog -u url -k account_key -p path -f filename\n' \
'*(Required field)'
def print_warning():
"""TODO: Docstring for print_warning.
:returns: TODO
"""
print 'Extension and Filename are mutually exclusive.'
return 1
def get_options():
"""TODO: Docstring for get_options.
:returns: TODO
"""
parser = OptionParser(usage=USAGE, version=VERSION)
parser.add_option('-u', '--url', action='store', type='string',
help='Url of the vhd *', dest='url', default='')
parser.add_option('-k', '--key', action='store', type='string',
help='Account Key', dest='account_key', default='')
parser.add_option('-f', '--file', action='store', type='string',
help='File name', dest='filename', default='')
parser.add_option('-p', '--path', action='store', type='string',
help='Searching path *', dest='path', default='/')
parser.add_option('-e', '--extension', action='store', type='string',
help='Extension', dest='extension', default='')
parser.add_option('-t', '--type', action='store', type='int',
help='EXT2/3/4; 2,3,4', dest='type', default='4')
parser.add_option('--ls', action='store_true',
help='List the dir', dest='ls', default=False)
(options, args) = parser.parse_args()
len(sys.argv) == 1 and exit(parser.print_help())
options.extension and options.filename and exit(print_warning())
tmp = urlparse(options.url)
options.account_name = tmp.netloc.split('.')[0]
options.container = tmp.path.split('/')[1]
options.vhd = tmp.path.split('/')[2]
options.host_base = tmp.netloc[tmp.netloc.find('.'):]
if options.account_key:
options.blob_service = BlobService(options.account_name,
options.account_key,
host_base=options.host_base)
options.blob_service._httpclient.request_session = requests.Session()
else:
options.blob_service = None
options.path_list = split_path(options.path)
return (options, args)
def log_time(fn):
"""TODO: Docstring for log_time.
:fn: TODO
:returns: TODO
"""
@wraps(fn)
def wrapper(*args, **kwargs):
start_time = time()
result = fn(*args, **kwargs)
print '%s -> Time used : %d\n' % (fn.__name__, time() - start_time)
return result
return wrapper
def embed_params(**kwargs):
"""TODO: Docstring for embed_params.
:**kwargs: TODO
:returns: TODO
"""
def decorator(fn):
@wraps(fn)
def wrapper(*arg):
return fn(*arg, **kwargs)
return wrapper
return decorator
def split_path(path):
"""TODO: Docstring for split_path.
:path: TODO
:returns: TODO
"""
item = [x for x in path.split('/') if x != '']
return item
def init_dir(path):
"""TODO: Docstring for init_dir.
:path: TODO
:returns: TODO
"""
if not isdir(path):
exists(path) and unlink(path)
makedirs(path)
| apache-2.0 | 4,956,004,392,117,826,000 | 25.84375 | 77 | 0.578871 | false |
estin/pomp | tests/test_contrib_urllib.py | 1 | 2529 | import logging
from pomp.core.base import BaseCrawler, BaseMiddleware
from pomp.core.engine import Pomp
from pomp.contrib.urllibtools import UrllibDownloader
from pomp.contrib.urllibtools import UrllibAdapterMiddleware
from mockserver import HttpServer, make_sitemap
from tools import DummyCrawler
from tools import RequestResponseMiddleware, CollectRequestResponseMiddleware
logging.basicConfig(level=logging.DEBUG)
class TestContribUrllib(object):
@classmethod
def setup_class(cls):
cls.httpd = HttpServer(sitemap=make_sitemap(level=2, links_on_page=2))
cls.httpd.start()
@classmethod
def teardown_class(cls):
cls.httpd.stop()
def test_urllib_downloader(self):
req_resp_midlleware = RequestResponseMiddleware(
prefix_url=self.httpd.location,
request_factory=lambda x: x,
)
collect_middleware = CollectRequestResponseMiddleware()
downloader = UrllibDownloader()
pomp = Pomp(
downloader=downloader,
middlewares=(
req_resp_midlleware,
UrllibAdapterMiddleware(),
collect_middleware,
),
pipelines=[],
)
class Crawler(DummyCrawler):
ENTRY_REQUESTS = '/root'
pomp.pump(Crawler())
assert \
set([r.url.replace(self.httpd.location, '')
for r in collect_middleware.requests]) == \
set(self.httpd.sitemap.keys())
def test_exception_handling(self):
class CatchException(BaseMiddleware):
def __init__(self):
self.exceptions = []
def process_exception(self, exception, crawler, downloader):
self.exceptions.append(exception)
return exception
class MockCrawler(BaseCrawler):
def next_requests(self, response):
return
def extract_items(self, response):
return
catch_exception_middleware = CatchException()
pomp = Pomp(
downloader=UrllibDownloader(),
middlewares=(
UrllibAdapterMiddleware(),
catch_exception_middleware,
),
pipelines=[],
)
MockCrawler.ENTRY_REQUESTS = [
'https://123.456.789.01:8081/fake_url',
'%s/root' % self.httpd.location,
]
pomp.pump(MockCrawler())
assert len(catch_exception_middleware.exceptions) == 1
| bsd-3-clause | 5,951,056,302,155,495,000 | 26.791209 | 78 | 0.601423 | false |
blitzmann/Pyfa | gui/fitCommands/guiChangeFighterQty.py | 2 | 1171 | import wx
import gui.mainFrame
from gui import globalEvents as GE
from .calc.fitChangeFighterQty import FitChangeFighterQty
from service.fit import Fit
from logbook import Logger
pyfalog = Logger(__name__)
class GuiChangeFighterQty(wx.Command):
def __init__(self, fitID, position, amount=1):
wx.Command.__init__(self, True, "")
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.sFit = Fit.getInstance()
self.fitID = fitID
self.position = position
self.amount = amount
self.internal_history = wx.CommandProcessor()
def Do(self):
cmd = FitChangeFighterQty(self.fitID, self.position, self.amount)
if self.internal_history.Submit(cmd):
self.sFit.recalc(self.fitID)
wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=self.fitID))
return True
return False
def Undo(self):
pyfalog.debug("{} Undo()".format(self))
for _ in self.internal_history.Commands:
self.internal_history.Undo()
self.sFit.recalc(self.fitID)
wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=self.fitID))
return True
| gpl-3.0 | 1,605,500,041,040,471,000 | 33.441176 | 73 | 0.657558 | false |
ashhher3/seaborn | seaborn/tests/test_categorical.py | 8 | 75157 | import numpy as np
import pandas as pd
import scipy
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
from distutils.version import LooseVersion
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
import nose.tools as nt
import numpy.testing as npt
from numpy.testing.decorators import skipif
from . import PlotTestCase
from .. import categorical as cat
from .. import palettes
class CategoricalFixture(PlotTestCase):
"""Test boxplot (also base class for things like violinplots)."""
rs = np.random.RandomState(30)
n_total = 60
x = rs.randn(n_total / 3, 3)
x_df = pd.DataFrame(x, columns=pd.Series(list("XYZ"), name="big"))
y = pd.Series(rs.randn(n_total), name="y_data")
g = pd.Series(np.repeat(list("abc"), n_total / 3), name="small")
h = pd.Series(np.tile(list("mn"), n_total / 2), name="medium")
u = pd.Series(np.tile(list("jkh"), n_total / 3))
df = pd.DataFrame(dict(y=y, g=g, h=h, u=u))
x_df["W"] = g
class TestCategoricalPlotter(CategoricalFixture):
def test_wide_df_data(self):
p = cat._CategoricalPlotter()
# Test basic wide DataFrame
p.establish_variables(data=self.x_df)
# Check data attribute
for x, y, in zip(p.plot_data, self.x_df[["X", "Y", "Z"]].values.T):
npt.assert_array_equal(x, y)
# Check semantic attributes
nt.assert_equal(p.orient, "v")
nt.assert_is(p.plot_hues, None)
nt.assert_is(p.group_label, "big")
nt.assert_is(p.value_label, None)
# Test wide dataframe with forced horizontal orientation
p.establish_variables(data=self.x_df, orient="horiz")
nt.assert_equal(p.orient, "h")
# Text exception by trying to hue-group with a wide dataframe
with nt.assert_raises(ValueError):
p.establish_variables(hue="d", data=self.x_df)
def test_1d_input_data(self):
p = cat._CategoricalPlotter()
# Test basic vector data
x_1d_array = self.x.ravel()
p.establish_variables(data=x_1d_array)
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.n_total)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
# Test basic vector data in list form
x_1d_list = x_1d_array.tolist()
p.establish_variables(data=x_1d_list)
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.n_total)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
# Test an object array that looks 1D but isn't
x_notreally_1d = np.array([self.x.ravel(),
self.x.ravel()[:self.n_total / 2]])
p.establish_variables(data=x_notreally_1d)
nt.assert_equal(len(p.plot_data), 2)
nt.assert_equal(len(p.plot_data[0]), self.n_total)
nt.assert_equal(len(p.plot_data[1]), self.n_total / 2)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_2d_input_data(self):
p = cat._CategoricalPlotter()
x = self.x[:, 0]
# Test vector data that looks 2D but doesn't really have columns
p.establish_variables(data=x[:, np.newaxis])
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.x.shape[0])
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
# Test vector data that looks 2D but doesn't really have rows
p.establish_variables(data=x[np.newaxis, :])
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.x.shape[0])
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_3d_input_data(self):
p = cat._CategoricalPlotter()
# Test that passing actually 3D data raises
x = np.zeros((5, 5, 5))
with nt.assert_raises(ValueError):
p.establish_variables(data=x)
def test_list_of_array_input_data(self):
p = cat._CategoricalPlotter()
# Test 2D input in list form
x_list = self.x.T.tolist()
p.establish_variables(data=x_list)
nt.assert_equal(len(p.plot_data), 3)
lengths = [len(v_i) for v_i in p.plot_data]
nt.assert_equal(lengths, [self.n_total / 3] * 3)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_wide_array_input_data(self):
p = cat._CategoricalPlotter()
# Test 2D input in array form
p.establish_variables(data=self.x)
nt.assert_equal(np.shape(p.plot_data), (3, self.n_total / 3))
npt.assert_array_equal(p.plot_data, self.x.T)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_single_long_direct_inputs(self):
p = cat._CategoricalPlotter()
# Test passing a series to the x variable
p.establish_variables(x=self.y)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "h")
nt.assert_equal(p.value_label, "y_data")
nt.assert_is(p.group_label, None)
# Test passing a series to the y variable
p.establish_variables(y=self.y)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "v")
nt.assert_equal(p.value_label, "y_data")
nt.assert_is(p.group_label, None)
# Test passing an array to the y variable
p.establish_variables(y=self.y.values)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "v")
nt.assert_is(p.value_label, None)
nt.assert_is(p.group_label, None)
def test_single_long_indirect_inputs(self):
p = cat._CategoricalPlotter()
# Test referencing a DataFrame series in the x variable
p.establish_variables(x="y", data=self.df)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "h")
nt.assert_equal(p.value_label, "y")
nt.assert_is(p.group_label, None)
# Test referencing a DataFrame series in the y variable
p.establish_variables(y="y", data=self.df)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "v")
nt.assert_equal(p.value_label, "y")
nt.assert_is(p.group_label, None)
def test_longform_groupby(self):
p = cat._CategoricalPlotter()
# Test a vertically oriented grouped and nested plot
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(len(p.plot_data), 3)
nt.assert_equal(len(p.plot_hues), 3)
nt.assert_equal(p.orient, "v")
nt.assert_equal(p.value_label, "y")
nt.assert_equal(p.group_label, "g")
nt.assert_equal(p.hue_title, "h")
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
# Test a grouped and nested plot with direct array value data
p.establish_variables("g", self.y.values, "h", self.df)
nt.assert_is(p.value_label, None)
nt.assert_equal(p.group_label, "g")
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
# Test a grouped and nested plot with direct array hue data
p.establish_variables("g", "y", self.h.values, self.df)
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
# Test categorical grouping data
if pandas_has_categoricals:
df = self.df.copy()
df.g = df.g.astype("category")
# Test that horizontal orientation is automatically detected
p.establish_variables("y", "g", "h", data=df)
nt.assert_equal(len(p.plot_data), 3)
nt.assert_equal(len(p.plot_hues), 3)
nt.assert_equal(p.orient, "h")
nt.assert_equal(p.value_label, "y")
nt.assert_equal(p.group_label, "g")
nt.assert_equal(p.hue_title, "h")
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
def test_input_validation(self):
p = cat._CategoricalPlotter()
kws = dict(x="g", y="y", hue="h", units="u", data=self.df)
for input in ["x", "y", "hue", "units"]:
input_kws = kws.copy()
input_kws[input] = "bad_input"
with nt.assert_raises(ValueError):
p.establish_variables(**input_kws)
def test_order(self):
p = cat._CategoricalPlotter()
# Test inferred order from a wide dataframe input
p.establish_variables(data=self.x_df)
nt.assert_equal(p.group_names, ["X", "Y", "Z"])
# Test specified order with a wide dataframe input
p.establish_variables(data=self.x_df, order=["Y", "Z", "X"])
nt.assert_equal(p.group_names, ["Y", "Z", "X"])
for group, vals in zip(["Y", "Z", "X"], p.plot_data):
npt.assert_array_equal(vals, self.x_df[group])
with nt.assert_raises(ValueError):
p.establish_variables(data=self.x, order=[1, 2, 0])
# Test inferred order from a grouped longform input
p.establish_variables("g", "y", data=self.df)
nt.assert_equal(p.group_names, ["a", "b", "c"])
# Test specified order from a grouped longform input
p.establish_variables("g", "y", data=self.df, order=["b", "a", "c"])
nt.assert_equal(p.group_names, ["b", "a", "c"])
for group, vals in zip(["b", "a", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
# Test inferred order from a grouped input with categorical groups
if pandas_has_categoricals:
df = self.df.copy()
df.g = df.g.astype("category")
df.g = df.g.cat.reorder_categories(["c", "b", "a"])
p.establish_variables("g", "y", data=df)
nt.assert_equal(p.group_names, ["c", "b", "a"])
for group, vals in zip(["c", "b", "a"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
df.g = (df.g.cat.add_categories("d")
.cat.reorder_categories(["c", "b", "d", "a"]))
p.establish_variables("g", "y", data=df)
nt.assert_equal(p.group_names, ["c", "b", "d", "a"])
def test_hue_order(self):
p = cat._CategoricalPlotter()
# Test inferred hue order
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.hue_names, ["m", "n"])
# Test specified hue order
p.establish_variables("g", "y", "h", data=self.df,
hue_order=["n", "m"])
nt.assert_equal(p.hue_names, ["n", "m"])
# Test inferred hue order from a categorical hue input
if pandas_has_categoricals:
df = self.df.copy()
df.h = df.h.astype("category")
df.h = df.h.cat.reorder_categories(["n", "m"])
p.establish_variables("g", "y", "h", data=df)
nt.assert_equal(p.hue_names, ["n", "m"])
df.h = (df.h.cat.add_categories("o")
.cat.reorder_categories(["o", "m", "n"]))
p.establish_variables("g", "y", "h", data=df)
nt.assert_equal(p.hue_names, ["o", "m", "n"])
def test_plot_units(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_is(p.plot_units, None)
p.establish_variables("g", "y", "h", data=self.df, units="u")
for group, units in zip(["a", "b", "c"], p.plot_units):
npt.assert_array_equal(units, self.u[self.g == group])
def test_infer_orient(self):
p = cat._CategoricalPlotter()
cats = pd.Series(["a", "b", "c"] * 10)
nums = pd.Series(self.rs.randn(30))
nt.assert_equal(p.infer_orient(cats, nums), "v")
nt.assert_equal(p.infer_orient(nums, cats), "h")
nt.assert_equal(p.infer_orient(nums, None), "h")
nt.assert_equal(p.infer_orient(None, nums), "v")
nt.assert_equal(p.infer_orient(nums, nums, "vert"), "v")
nt.assert_equal(p.infer_orient(nums, nums, "hori"), "h")
with nt.assert_raises(ValueError):
p.infer_orient(cats, cats)
if pandas_has_categoricals:
cats = pd.Series([0, 1, 2] * 10, dtype="category")
nt.assert_equal(p.infer_orient(cats, nums), "v")
nt.assert_equal(p.infer_orient(nums, cats), "h")
with nt.assert_raises(ValueError):
p.infer_orient(cats, cats)
def test_default_palettes(self):
p = cat._CategoricalPlotter()
# Test palette mapping the x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, None, 1)
nt.assert_equal(p.colors, palettes.color_palette("deep", 3))
# Test palette mapping the hue position
p.establish_variables("g", "y", "h", data=self.df)
p.establish_colors(None, None, 1)
nt.assert_equal(p.colors, palettes.color_palette("deep", 2))
def test_default_palette_with_many_levels(self):
with palettes.color_palette(["blue", "red"], 2):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, None, 1)
npt.assert_array_equal(p.colors, palettes.husl_palette(3, l=.7))
def test_specific_color(self):
p = cat._CategoricalPlotter()
# Test the same color for each x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors("blue", None, 1)
blue_rgb = mpl.colors.colorConverter.to_rgb("blue")
nt.assert_equal(p.colors, [blue_rgb] * 3)
# Test a color-based blend for the hue mapping
p.establish_variables("g", "y", "h", data=self.df)
p.establish_colors("#ff0022", None, 1)
rgba_array = np.array(palettes.light_palette("#ff0022", 2))
npt.assert_array_almost_equal(p.colors,
rgba_array[:, :3])
def test_specific_palette(self):
p = cat._CategoricalPlotter()
# Test palette mapping the x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, "dark", 1)
nt.assert_equal(p.colors, palettes.color_palette("dark", 3))
# Test that non-None `color` and `hue` raises an error
p.establish_variables("g", "y", "h", data=self.df)
p.establish_colors(None, "muted", 1)
nt.assert_equal(p.colors, palettes.color_palette("muted", 2))
# Test that specified palette overrides specified color
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors("blue", "deep", 1)
nt.assert_equal(p.colors, palettes.color_palette("deep", 3))
def test_dict_as_palette(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", "h", data=self.df)
pal = {"m": (0, 0, 1), "n": (1, 0, 0)}
p.establish_colors(None, pal, 1)
nt.assert_equal(p.colors, [(0, 0, 1), (1, 0, 0)])
def test_palette_desaturation(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors((0, 0, 1), None, .5)
nt.assert_equal(p.colors, [(.25, .25, .75)] * 3)
p.establish_colors(None, [(0, 0, 1), (1, 0, 0), "w"], .5)
nt.assert_equal(p.colors, [(.25, .25, .75),
(.75, .25, .25),
(1, 1, 1)])
class TestCategoricalStatPlotter(CategoricalFixture):
def test_no_bootstrappig(self):
p = cat._CategoricalStatPlotter()
p.establish_variables("g", "y", data=self.df)
p.estimate_statistic(np.mean, None, 100)
npt.assert_array_equal(p.confint, np.array([]))
p.establish_variables("g", "y", "h", data=self.df)
p.estimate_statistic(np.mean, None, 100)
npt.assert_array_equal(p.confint, np.array([[], [], []]))
def test_single_layer_stats(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y)
p.estimate_statistic(np.mean, 95, 10000)
nt.assert_equal(p.statistic.shape, (3,))
nt.assert_equal(p.confint.shape, (3, 2))
npt.assert_array_almost_equal(p.statistic,
y.groupby(g).mean())
for ci, (_, grp_y) in zip(p.confint, y.groupby(g)):
sem = stats.sem(grp_y)
mean = grp_y.mean()
stats.norm.ppf(.975)
half_ci = stats.norm.ppf(.975) * sem
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_single_layer_stats_with_units(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 90))
y = pd.Series(np.random.RandomState(0).randn(270))
u = pd.Series(np.repeat(np.tile(list("xyz"), 30), 3))
y[u == "x"] -= 3
y[u == "y"] += 3
p.establish_variables(g, y)
p.estimate_statistic(np.mean, 95, 10000)
stat1, ci1 = p.statistic, p.confint
p.establish_variables(g, y, units=u)
p.estimate_statistic(np.mean, 95, 10000)
stat2, ci2 = p.statistic, p.confint
npt.assert_array_equal(stat1, stat2)
ci1_size = ci1[:, 1] - ci1[:, 0]
ci2_size = ci2[:, 1] - ci2[:, 0]
npt.assert_array_less(ci1_size, ci2_size)
def test_single_layer_stats_with_missing_data(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, order=list("abdc"))
p.estimate_statistic(np.mean, 95, 10000)
nt.assert_equal(p.statistic.shape, (4,))
nt.assert_equal(p.confint.shape, (4, 2))
mean = y[g == "b"].mean()
sem = stats.sem(y[g == "b"])
half_ci = stats.norm.ppf(.975) * sem
ci = mean - half_ci, mean + half_ci
npt.assert_almost_equal(p.statistic[1], mean)
npt.assert_array_almost_equal(p.confint[1], ci, 2)
npt.assert_equal(p.statistic[2], np.nan)
npt.assert_array_equal(p.confint[2], (np.nan, np.nan))
def test_nested_stats(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
h = pd.Series(np.tile(list("xy"), 150))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, 95, 50000)
nt.assert_equal(p.statistic.shape, (3, 2))
nt.assert_equal(p.confint.shape, (3, 2, 2))
npt.assert_array_almost_equal(p.statistic,
y.groupby([g, h]).mean().unstack())
for ci_g, (_, grp_y) in zip(p.confint, y.groupby(g)):
for ci, hue_y in zip(ci_g, [grp_y[::2], grp_y[1::2]]):
sem = stats.sem(hue_y)
mean = hue_y.mean()
half_ci = stats.norm.ppf(.975) * sem
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_nested_stats_with_units(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 90))
h = pd.Series(np.tile(list("xy"), 135))
u = pd.Series(np.repeat(list("ijkijk"), 45))
y = pd.Series(np.random.RandomState(0).randn(270))
y[u == "i"] -= 3
y[u == "k"] += 3
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, 95, 10000)
stat1, ci1 = p.statistic, p.confint
p.establish_variables(g, y, h, units=u)
p.estimate_statistic(np.mean, 95, 10000)
stat2, ci2 = p.statistic, p.confint
npt.assert_array_equal(stat1, stat2)
ci1_size = ci1[:, 0, 1] - ci1[:, 0, 0]
ci2_size = ci2[:, 0, 1] - ci2[:, 0, 0]
npt.assert_array_less(ci1_size, ci2_size)
def test_nested_stats_with_missing_data(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
h = pd.Series(np.tile(list("xy"), 150))
p.establish_variables(g, y, h,
order=list("abdc"),
hue_order=list("zyx"))
p.estimate_statistic(np.mean, 95, 50000)
nt.assert_equal(p.statistic.shape, (4, 3))
nt.assert_equal(p.confint.shape, (4, 3, 2))
mean = y[(g == "b") & (h == "x")].mean()
sem = stats.sem(y[(g == "b") & (h == "x")])
half_ci = stats.norm.ppf(.975) * sem
ci = mean - half_ci, mean + half_ci
npt.assert_almost_equal(p.statistic[1, 2], mean)
npt.assert_array_almost_equal(p.confint[1, 2], ci, 2)
npt.assert_array_equal(p.statistic[:, 0], [np.nan] * 4)
npt.assert_array_equal(p.statistic[2], [np.nan] * 3)
npt.assert_array_equal(p.confint[:, 0],
np.zeros((4, 2)) * np.nan)
npt.assert_array_equal(p.confint[2],
np.zeros((3, 2)) * np.nan)
def test_estimator_value_label(self):
p = cat._CategoricalStatPlotter()
p.establish_variables("g", "y", data=self.df)
p.estimate_statistic(np.mean, None, 100)
nt.assert_equal(p.value_label, "mean(y)")
p = cat._CategoricalStatPlotter()
p.establish_variables("g", "y", data=self.df)
p.estimate_statistic(np.median, None, 100)
nt.assert_equal(p.value_label, "median(y)")
def test_draw_cis(self):
p = cat._CategoricalStatPlotter()
# Test vertical CIs
p.orient = "v"
f, ax = plt.subplots()
at_group = [0, 1]
confints = [(.5, 1.5), (.25, .8)]
colors = [".2", ".3"]
p.draw_confints(ax, at_group, confints, colors)
lines = ax.lines
for line, at, ci, c in zip(lines, at_group, confints, colors):
x, y = line.get_xydata().T
npt.assert_array_equal(x, [at, at])
npt.assert_array_equal(y, ci)
nt.assert_equal(line.get_color(), c)
plt.close("all")
# Test horizontal CIs
p.orient = "h"
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors)
lines = ax.lines
for line, at, ci, c in zip(lines, at_group, confints, colors):
x, y = line.get_xydata().T
npt.assert_array_equal(x, ci)
npt.assert_array_equal(y, [at, at])
nt.assert_equal(line.get_color(), c)
plt.close("all")
# Test extra keyword arguments
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors, lw=4)
line = ax.lines[0]
nt.assert_equal(line.get_linewidth(), 4)
plt.close("all")
class TestBoxPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
orient=None, color=None, palette=None,
saturation=.75, width=.8,
fliersize=5, linewidth=None)
def test_nested_width(self):
p = cat._BoxPlotter(**self.default_kws)
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.nested_width, .4 * .98)
kws = self.default_kws.copy()
kws["width"] = .6
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.nested_width, .3 * .98)
def test_hue_offsets(self):
p = cat._BoxPlotter(**self.default_kws)
p.establish_variables("g", "y", "h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.2, .2])
kws = self.default_kws.copy()
kws["width"] = .6
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", "h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.15, .15])
p = cat._BoxPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
npt.assert_array_almost_equal(p.hue_offsets, [-.2, 0, .2])
def test_axes_data(self):
ax = cat.boxplot("g", "y", data=self.df)
nt.assert_equal(len(ax.artists), 3)
plt.close("all")
ax = cat.boxplot("g", "y", "h", data=self.df)
nt.assert_equal(len(ax.artists), 6)
plt.close("all")
def test_box_colors(self):
ax = cat.boxplot("g", "y", data=self.df, saturation=1)
pal = palettes.color_palette("deep", 3)
for patch, color in zip(ax.artists, pal):
nt.assert_equal(patch.get_facecolor()[:3], color)
plt.close("all")
ax = cat.boxplot("g", "y", "h", data=self.df, saturation=1)
pal = palettes.color_palette("deep", 2)
for patch, color in zip(ax.artists, pal * 2):
nt.assert_equal(patch.get_facecolor()[:3], color)
plt.close("all")
def test_draw_missing_boxes(self):
ax = cat.boxplot("g", "y", data=self.df,
order=["a", "b", "c", "d"])
nt.assert_equal(len(ax.artists), 3)
def test_missing_data(self):
x = ["a", "a", "b", "b", "c", "c", "d", "d"]
h = ["x", "y", "x", "y", "x", "y", "x", "y"]
y = self.rs.randn(8)
y[-2:] = np.nan
ax = cat.boxplot(x, y)
nt.assert_equal(len(ax.artists), 3)
plt.close("all")
y[-1] = 0
ax = cat.boxplot(x, y, h)
nt.assert_equal(len(ax.artists), 7)
plt.close("all")
def test_boxplots(self):
# Smoke test the high level boxplot options
cat.boxplot("y", data=self.df)
plt.close("all")
cat.boxplot(y="y", data=self.df)
plt.close("all")
cat.boxplot("g", "y", data=self.df)
plt.close("all")
cat.boxplot("y", "g", data=self.df, orient="h")
plt.close("all")
cat.boxplot("g", "y", "h", data=self.df)
plt.close("all")
cat.boxplot("g", "y", "h", order=list("nabc"), data=self.df)
plt.close("all")
cat.boxplot("g", "y", "h", hue_order=list("omn"), data=self.df)
plt.close("all")
cat.boxplot("y", "g", "h", data=self.df, orient="h")
plt.close("all")
def test_axes_annotation(self):
ax = cat.boxplot("g", "y", data=self.df)
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "y")
nt.assert_equal(ax.get_xlim(), (-.5, 2.5))
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
plt.close("all")
ax = cat.boxplot("g", "y", "h", data=self.df)
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "y")
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
npt.assert_array_equal([l.get_text() for l in ax.legend_.get_texts()],
["m", "n"])
plt.close("all")
ax = cat.boxplot("y", "g", data=self.df, orient="h")
nt.assert_equal(ax.get_xlabel(), "y")
nt.assert_equal(ax.get_ylabel(), "g")
nt.assert_equal(ax.get_ylim(), (2.5, -.5))
npt.assert_array_equal(ax.get_yticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_yticklabels()],
["a", "b", "c"])
plt.close("all")
class TestViolinPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
bw="scott", cut=2, scale="area", scale_hue=True,
gridsize=100, width=.8, inner="box", split=False,
orient=None, linewidth=None,
color=None, palette=None, saturation=.75)
def test_split_error(self):
kws = self.default_kws.copy()
kws.update(dict(x="h", y="y", hue="g", data=self.df, split=True))
with nt.assert_raises(ValueError):
cat._ViolinPlotter(**kws)
def test_no_observations(self):
p = cat._ViolinPlotter(**self.default_kws)
x = ["a", "a", "b"]
y = self.rs.randn(3)
y[-1] = np.nan
p.establish_variables(x, y)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[0]), 20)
nt.assert_equal(len(p.support[1]), 0)
nt.assert_equal(len(p.density[0]), 20)
nt.assert_equal(len(p.density[1]), 1)
nt.assert_equal(p.density[1].item(), 1)
p.estimate_densities("scott", 2, "count", True, 20)
nt.assert_equal(p.density[1].item(), 0)
x = ["a"] * 4 + ["b"] * 2
y = self.rs.randn(6)
h = ["m", "n"] * 2 + ["m"] * 2
p.establish_variables(x, y, h)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[1][0]), 20)
nt.assert_equal(len(p.support[1][1]), 0)
nt.assert_equal(len(p.density[1][0]), 20)
nt.assert_equal(len(p.density[1][1]), 1)
nt.assert_equal(p.density[1][1].item(), 1)
p.estimate_densities("scott", 2, "count", False, 20)
nt.assert_equal(p.density[1][1].item(), 0)
def test_single_observation(self):
p = cat._ViolinPlotter(**self.default_kws)
x = ["a", "a", "b"]
y = self.rs.randn(3)
p.establish_variables(x, y)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[0]), 20)
nt.assert_equal(len(p.support[1]), 1)
nt.assert_equal(len(p.density[0]), 20)
nt.assert_equal(len(p.density[1]), 1)
nt.assert_equal(p.density[1].item(), 1)
p.estimate_densities("scott", 2, "count", True, 20)
nt.assert_equal(p.density[1].item(), .5)
x = ["b"] * 4 + ["a"] * 3
y = self.rs.randn(7)
h = (["m", "n"] * 4)[:-1]
p.establish_variables(x, y, h)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[1][0]), 20)
nt.assert_equal(len(p.support[1][1]), 1)
nt.assert_equal(len(p.density[1][0]), 20)
nt.assert_equal(len(p.density[1][1]), 1)
nt.assert_equal(p.density[1][1].item(), 1)
p.estimate_densities("scott", 2, "count", False, 20)
nt.assert_equal(p.density[1][1].item(), .5)
def test_dwidth(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", data=self.df))
p = cat._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .4)
kws.update(dict(width=.4))
p = cat._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .2)
kws.update(dict(hue="h", width=.8))
p = cat._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .2)
kws.update(dict(split=True))
p = cat._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .4)
def test_scale_area(self):
kws = self.default_kws.copy()
kws["scale"] = "area"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)]
max_before = np.array([d.max() for d in density])
p.scale_area(density, max_before, False)
max_after = np.array([d.max() for d in density])
nt.assert_equal(max_after[0], 1)
before_ratio = max_before[1] / max_before[0]
after_ratio = max_after[1] / max_after[0]
nt.assert_equal(before_ratio, after_ratio)
# Test nested grouping scaling across all densities
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
max_before = np.array([[r.max() for r in row] for row in density])
p.scale_area(density, max_before, False)
max_after = np.array([[r.max() for r in row] for row in density])
nt.assert_equal(max_after[0, 0], 1)
before_ratio = max_before[1, 1] / max_before[0, 0]
after_ratio = max_after[1, 1] / max_after[0, 0]
nt.assert_equal(before_ratio, after_ratio)
# Test nested grouping scaling within hue
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
max_before = np.array([[r.max() for r in row] for row in density])
p.scale_area(density, max_before, True)
max_after = np.array([[r.max() for r in row] for row in density])
nt.assert_equal(max_after[0, 0], 1)
nt.assert_equal(max_after[1, 0], 1)
before_ratio = max_before[1, 1] / max_before[1, 0]
after_ratio = max_after[1, 1] / max_after[1, 0]
nt.assert_equal(before_ratio, after_ratio)
def test_scale_width(self):
kws = self.default_kws.copy()
kws["scale"] = "width"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)]
p.scale_width(density)
max_after = np.array([d.max() for d in density])
npt.assert_array_equal(max_after, [1, 1])
# Test nested grouping
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
p.scale_width(density)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[1, 1], [1, 1]])
def test_scale_count(self):
kws = self.default_kws.copy()
kws["scale"] = "count"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 20), self.rs.uniform(0, .2, 40)]
counts = np.array([20, 40])
p.scale_count(density, counts, False)
max_after = np.array([d.max() for d in density])
npt.assert_array_equal(max_after, [.5, 1])
# Test nested grouping scaling across all densities
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 5), self.rs.uniform(0, .2, 40)],
[self.rs.uniform(0, .1, 100), self.rs.uniform(0, .02, 50)]]
counts = np.array([[5, 40], [100, 50]])
p.scale_count(density, counts, False)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[.05, .4], [1, .5]])
# Test nested grouping scaling within hue
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 5), self.rs.uniform(0, .2, 40)],
[self.rs.uniform(0, .1, 100), self.rs.uniform(0, .02, 50)]]
counts = np.array([[5, 40], [100, 50]])
p.scale_count(density, counts, True)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[.125, 1], [1, .5]])
def test_bad_scale(self):
kws = self.default_kws.copy()
kws["scale"] = "not_a_scale_type"
with nt.assert_raises(ValueError):
cat._ViolinPlotter(**kws)
def test_kde_fit(self):
p = cat._ViolinPlotter(**self.default_kws)
data = self.y
data_std = data.std(ddof=1)
# Bandwidth behavior depends on scipy version
if LooseVersion(scipy.__version__) < "0.11":
# Test ignoring custom bandwidth on old scipy
kde, bw = p.fit_kde(self.y, .2)
nt.assert_is_instance(kde, stats.gaussian_kde)
nt.assert_equal(kde.factor, kde.scotts_factor())
else:
# Test reference rule bandwidth
kde, bw = p.fit_kde(data, "scott")
nt.assert_is_instance(kde, stats.gaussian_kde)
nt.assert_equal(kde.factor, kde.scotts_factor())
nt.assert_equal(bw, kde.scotts_factor() * data_std)
# Test numeric scale factor
kde, bw = p.fit_kde(self.y, .2)
nt.assert_is_instance(kde, stats.gaussian_kde)
nt.assert_equal(kde.factor, .2)
nt.assert_equal(bw, .2 * data_std)
def test_draw_to_density(self):
p = cat._ViolinPlotter(**self.default_kws)
# p.dwidth will be 1 for easier testing
p.width = 2
# Test verical plots
support = np.array([.2, .6])
density = np.array([.1, .4])
# Test full vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, False)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.99 * -.4, .99 * .4])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Test left vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, "left")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.99 * -.4, 0])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Test right vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, "right")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [0, .99 * .4])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Switch orientation to test horizontal plots
p.orient = "h"
support = np.array([.2, .5])
density = np.array([.3, .7])
# Test full horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, False)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [.99 * -.7, .99 * .7])
plt.close("all")
# Test left horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, "left")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [.99 * -.7, 0])
plt.close("all")
# Test right horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, "right")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [0, .99 * .7])
plt.close("all")
def test_draw_single_observations(self):
p = cat._ViolinPlotter(**self.default_kws)
p.width = 2
# Test vertical plot
_, ax = plt.subplots()
p.draw_single_observation(ax, 1, 1.5, 1)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [0, 2])
npt.assert_array_equal(y, [1.5, 1.5])
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_single_observation(ax, 2, 2.2, .5)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [2.2, 2.2])
npt.assert_array_equal(y, [1.5, 2.5])
plt.close("all")
def test_draw_box_lines(self):
# Test vertical plot
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_box_lines(ax, self.y, p.support[0], p.density[0], 0)
nt.assert_equal(len(ax.lines), 2)
q25, q50, q75 = np.percentile(self.y, [25, 50, 75])
_, y = ax.lines[1].get_xydata().T
npt.assert_array_equal(y, [q25, q75])
_, y = ax.collections[0].get_offsets().T
nt.assert_equal(y, q50)
plt.close("all")
# Test horizontal plot
kws = self.default_kws.copy()
kws.update(dict(x="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_box_lines(ax, self.y, p.support[0], p.density[0], 0)
nt.assert_equal(len(ax.lines), 2)
q25, q50, q75 = np.percentile(self.y, [25, 50, 75])
x, _ = ax.lines[1].get_xydata().T
npt.assert_array_equal(x, [q25, q75])
x, _ = ax.collections[0].get_offsets().T
nt.assert_equal(x, q50)
plt.close("all")
def test_draw_quartiles(self):
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_quartiles(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(np.percentile(self.y, [25, 50, 75]), ax.lines):
_, y = line.get_xydata().T
npt.assert_array_equal(y, [val, val])
def test_draw_points(self):
p = cat._ViolinPlotter(**self.default_kws)
# Test vertical plot
_, ax = plt.subplots()
p.draw_points(ax, self.y, 0)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, np.zeros_like(self.y))
npt.assert_array_equal(y, self.y)
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_points(ax, self.y, 0)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.y)
npt.assert_array_equal(y, np.zeros_like(self.y))
plt.close("all")
def test_draw_sticks(self):
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
# Test vertical plot
_, ax = plt.subplots()
p.draw_stick_lines(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(self.y, ax.lines):
_, y = line.get_xydata().T
npt.assert_array_equal(y, [val, val])
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_stick_lines(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(self.y, ax.lines):
x, _ = line.get_xydata().T
npt.assert_array_equal(x, [val, val])
plt.close("all")
def test_validate_inner(self):
kws = self.default_kws.copy()
kws.update(dict(inner="bad_inner"))
with nt.assert_raises(ValueError):
cat._ViolinPlotter(**kws)
def test_draw_violinplots(self):
kws = self.default_kws.copy()
# Test single vertical violin
kws.update(dict(y="y", data=self.df, inner=None,
saturation=1, color=(1, 0, 0, 1)))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
npt.assert_array_equal(ax.collections[0].get_facecolors(),
[(1, 0, 0, 1)])
plt.close("all")
# Test single horizontal violin
kws.update(dict(x="y", y=None, color=(0, 1, 0, 1)))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
npt.assert_array_equal(ax.collections[0].get_facecolors(),
[(0, 1, 0, 1)])
plt.close("all")
# Test multiple vertical violins
kws.update(dict(x="g", y="y", color=None,))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
for violin, color in zip(ax.collections, palettes.color_palette()):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
# Test multiple violins with hue nesting
kws.update(dict(hue="h"))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 6)
for violin, color in zip(ax.collections,
palettes.color_palette(n_colors=2) * 3):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
# Test multiple split violins
kws.update(dict(split=True, palette="muted"))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 6)
for violin, color in zip(ax.collections,
palettes.color_palette("muted",
n_colors=2) * 3):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
def test_draw_violinplots_no_observations(self):
kws = self.default_kws.copy()
kws["inner"] = None
# Test single layer of grouping
x = ["a", "a", "b"]
y = self.rs.randn(3)
y[-1] = np.nan
kws.update(x=x, y=y)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), 0)
plt.close("all")
# Test nested hue grouping
x = ["a"] * 4 + ["b"] * 2
y = self.rs.randn(6)
h = ["m", "n"] * 2 + ["m"] * 2
kws.update(x=x, y=y, hue=h)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
nt.assert_equal(len(ax.lines), 0)
plt.close("all")
def test_draw_violinplots_single_observations(self):
kws = self.default_kws.copy()
kws["inner"] = None
# Test single layer of grouping
x = ["a", "a", "b"]
y = self.rs.randn(3)
kws.update(x=x, y=y)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), 1)
plt.close("all")
# Test nested hue grouping
x = ["b"] * 4 + ["a"] * 3
y = self.rs.randn(7)
h = (["m", "n"] * 4)[:-1]
kws.update(x=x, y=y, hue=h)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
nt.assert_equal(len(ax.lines), 1)
plt.close("all")
# Test nested hue grouping with split
kws["split"] = True
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
nt.assert_equal(len(ax.lines), 1)
plt.close("all")
def test_violinplots(self):
# Smoke test the high level violinplot options
cat.violinplot("y", data=self.df)
plt.close("all")
cat.violinplot(y="y", data=self.df)
plt.close("all")
cat.violinplot("g", "y", data=self.df)
plt.close("all")
cat.violinplot("y", "g", data=self.df, orient="h")
plt.close("all")
cat.violinplot("g", "y", "h", data=self.df)
plt.close("all")
cat.violinplot("g", "y", "h", order=list("nabc"), data=self.df)
plt.close("all")
cat.violinplot("g", "y", "h", hue_order=list("omn"), data=self.df)
plt.close("all")
cat.violinplot("y", "g", "h", data=self.df, orient="h")
plt.close("all")
for inner in ["box", "quart", "point", "stick", None]:
cat.violinplot("g", "y", data=self.df, inner=inner)
plt.close("all")
cat.violinplot("g", "y", "h", data=self.df, inner=inner)
plt.close("all")
cat.violinplot("g", "y", "h", data=self.df,
inner=inner, split=True)
plt.close("all")
class TestStripPlotter(CategoricalFixture):
def test_stripplot_vertical(self):
pal = palettes.color_palette()
ax = cat.stripplot("g", "y", data=self.df)
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_equal(x, np.ones(len(x)) * i)
npt.assert_array_equal(y, vals)
npt.assert_equal(ax.collections[i].get_facecolors()[0, :3], pal[i])
@skipif(not pandas_has_categoricals)
def test_stripplot_horiztonal(self):
df = self.df.copy()
df.g = df.g.astype("category")
ax = cat.stripplot("y", "g", data=df)
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_equal(x, vals)
npt.assert_array_equal(y, np.ones(len(x)) * i)
def test_stripplot_jitter(self):
pal = palettes.color_palette()
ax = cat.stripplot("g", "y", data=self.df, jitter=True)
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_less(np.ones(len(x)) * i - .1, x)
npt.assert_array_less(x, np.ones(len(x)) * i + .1)
npt.assert_array_equal(y, vals)
npt.assert_equal(ax.collections[i].get_facecolors()[0, :3], pal[i])
def test_split_nested_stripplot_vertical(self):
pal = palettes.color_palette()
ax = cat.stripplot("g", "y", "h", data=self.df)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_equal(x, np.ones(len(x)) * i + [-.2, .2][j])
npt.assert_array_equal(y, vals)
fc = ax.collections[i * 2 + j].get_facecolors()[0, :3]
npt.assert_equal(fc, pal[j])
@skipif(not pandas_has_categoricals)
def test_split_nested_stripplot_horizontal(self):
df = self.df.copy()
df.g = df.g.astype("category")
ax = cat.stripplot("y", "g", "h", data=df)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_equal(x, vals)
npt.assert_array_equal(y, np.ones(len(x)) * i + [-.2, .2][j])
def test_unsplit_nested_stripplot_vertical(self):
pal = palettes.color_palette()
# Test a simple vertical strip plot
ax = cat.stripplot("g", "y", "h", data=self.df, split=False)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_equal(x, np.ones(len(x)) * i)
npt.assert_array_equal(y, vals)
fc = ax.collections[i * 2 + j].get_facecolors()[0, :3]
npt.assert_equal(fc, pal[j])
@skipif(not pandas_has_categoricals)
def test_unsplit_nested_stripplot_horizontal(self):
df = self.df.copy()
df.g = df.g.astype("category")
ax = cat.stripplot("y", "g", "h", data=df, split=False)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_equal(x, vals)
npt.assert_array_equal(y, np.ones(len(x)) * i)
class TestBarPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
estimator=np.mean, ci=95, n_boot=100, units=None,
order=None, hue_order=None,
orient=None, color=None, palette=None,
saturation=.75, errcolor=".26")
def test_nested_width(self):
kws = self.default_kws.copy()
p = cat._BarPlotter(**kws)
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.nested_width, .8 / 2)
p = cat._BarPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
nt.assert_equal(p.nested_width, .8 / 3)
def test_draw_vertical_bars(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
nt.assert_equal(len(ax.patches), len(p.plot_data))
nt.assert_equal(len(ax.lines), len(p.plot_data))
for bar, color in zip(ax.patches, p.colors):
nt.assert_equal(bar.get_facecolor()[:-1], color)
positions = np.arange(len(p.plot_data)) - p.width / 2
for bar, pos, stat in zip(ax.patches, positions, p.statistic):
nt.assert_equal(bar.get_x(), pos)
nt.assert_equal(bar.get_y(), min(0, stat))
nt.assert_equal(bar.get_height(), abs(stat))
nt.assert_equal(bar.get_width(), p.width)
def test_draw_horizontal_bars(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", orient="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
nt.assert_equal(len(ax.patches), len(p.plot_data))
nt.assert_equal(len(ax.lines), len(p.plot_data))
for bar, color in zip(ax.patches, p.colors):
nt.assert_equal(bar.get_facecolor()[:-1], color)
positions = np.arange(len(p.plot_data)) - p.width / 2
for bar, pos, stat in zip(ax.patches, positions, p.statistic):
nt.assert_equal(bar.get_x(), min(0, stat))
nt.assert_equal(bar.get_y(), pos)
nt.assert_equal(bar.get_height(), p.width)
nt.assert_equal(bar.get_width(), abs(stat))
def test_draw_nested_vertical_bars(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
n_groups, n_hues = len(p.plot_data), len(p.hue_names)
nt.assert_equal(len(ax.patches), n_groups * n_hues)
nt.assert_equal(len(ax.lines), n_groups * n_hues)
for bar in ax.patches[:n_groups]:
nt.assert_equal(bar.get_facecolor()[:-1], p.colors[0])
for bar in ax.patches[n_groups:]:
nt.assert_equal(bar.get_facecolor()[:-1], p.colors[1])
for bar, stat in zip(ax.patches, p.statistic.T.flat):
nt.assert_almost_equal(bar.get_y(), min(0, stat))
nt.assert_almost_equal(bar.get_height(), abs(stat))
positions = np.arange(len(p.plot_data))
for bar, pos in zip(ax.patches[:n_groups], positions):
nt.assert_almost_equal(bar.get_x(), pos - p.width / 2)
nt.assert_almost_equal(bar.get_width(), p.nested_width)
def test_draw_nested_horizontal_bars(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", hue="h", orient="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
n_groups, n_hues = len(p.plot_data), len(p.hue_names)
nt.assert_equal(len(ax.patches), n_groups * n_hues)
nt.assert_equal(len(ax.lines), n_groups * n_hues)
for bar in ax.patches[:n_groups]:
nt.assert_equal(bar.get_facecolor()[:-1], p.colors[0])
for bar in ax.patches[n_groups:]:
nt.assert_equal(bar.get_facecolor()[:-1], p.colors[1])
positions = np.arange(len(p.plot_data))
for bar, pos in zip(ax.patches[:n_groups], positions):
nt.assert_almost_equal(bar.get_y(), pos - p.width / 2)
nt.assert_almost_equal(bar.get_height(), p.nested_width)
for bar, stat in zip(ax.patches, p.statistic.T.flat):
nt.assert_almost_equal(bar.get_x(), min(0, stat))
nt.assert_almost_equal(bar.get_width(), abs(stat))
def test_draw_missing_bars(self):
kws = self.default_kws.copy()
order = list("abcd")
kws.update(x="g", y="y", order=order, data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
nt.assert_equal(len(ax.patches), len(order))
nt.assert_equal(len(ax.lines), len(order))
plt.close("all")
hue_order = list("mno")
kws.update(x="g", y="y", hue="h", hue_order=hue_order, data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
nt.assert_equal(len(ax.patches), len(p.plot_data) * len(hue_order))
nt.assert_equal(len(ax.lines), len(p.plot_data) * len(hue_order))
plt.close("all")
def test_barplot_colors(self):
# Test unnested palette colors
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df,
saturation=1, palette="muted")
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
palette = palettes.color_palette("muted", len(self.g.unique()))
for patch, pal_color in zip(ax.patches, palette):
nt.assert_equal(patch.get_facecolor()[:-1], pal_color)
plt.close("all")
# Test single color
color = (.2, .2, .3, 1)
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df,
saturation=1, color=color)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
for patch in ax.patches:
nt.assert_equal(patch.get_facecolor(), color)
plt.close("all")
# Test nested palette colors
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df,
saturation=1, palette="Set2")
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
palette = palettes.color_palette("Set2", len(self.h.unique()))
for patch in ax.patches[:len(self.g.unique())]:
nt.assert_equal(patch.get_facecolor()[:-1], palette[0])
for patch in ax.patches[len(self.g.unique()):]:
nt.assert_equal(patch.get_facecolor()[:-1], palette[1])
plt.close("all")
def test_simple_barplots(self):
ax = cat.barplot("g", "y", data=self.df)
nt.assert_equal(len(ax.patches), len(self.g.unique()))
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "mean(y)")
plt.close("all")
ax = cat.barplot("y", "g", orient="h", data=self.df)
nt.assert_equal(len(ax.patches), len(self.g.unique()))
nt.assert_equal(ax.get_xlabel(), "mean(y)")
nt.assert_equal(ax.get_ylabel(), "g")
plt.close("all")
ax = cat.barplot("g", "y", "h", data=self.df)
nt.assert_equal(len(ax.patches),
len(self.g.unique()) * len(self.h.unique()))
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "mean(y)")
plt.close("all")
ax = cat.barplot("y", "g", "h", orient="h", data=self.df)
nt.assert_equal(len(ax.patches),
len(self.g.unique()) * len(self.h.unique()))
nt.assert_equal(ax.get_xlabel(), "mean(y)")
nt.assert_equal(ax.get_ylabel(), "g")
plt.close("all")
class TestPointPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
estimator=np.mean, ci=95, n_boot=100, units=None,
order=None, hue_order=None,
markers="o", linestyles="-", dodge=0,
join=True, scale=1,
orient=None, color=None, palette=None)
def test_different_defualt_colors(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", data=self.df))
p = cat._PointPlotter(**kws)
color = palettes.color_palette()[0]
npt.assert_array_equal(p.colors, [color, color, color])
def test_hue_offsets(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", hue="h", data=self.df))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [0, 0])
kws.update(dict(dodge=.5))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [-.25, .25])
kws.update(dict(x="h", hue="g", dodge=0))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [0, 0, 0])
kws.update(dict(dodge=.3))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [-.15, 0, .15])
def test_draw_vertical_points(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), len(p.plot_data) + 1)
points = ax.collections[0]
nt.assert_equal(len(points.get_offsets()), len(p.plot_data))
x, y = points.get_offsets().T
npt.assert_array_equal(x, np.arange(len(p.plot_data)))
npt.assert_array_equal(y, p.statistic)
for got_color, want_color in zip(points.get_facecolors(),
p.colors):
npt.assert_array_equal(got_color[:-1], want_color)
def test_draw_horizontal_points(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", orient="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), len(p.plot_data) + 1)
points = ax.collections[0]
nt.assert_equal(len(points.get_offsets()), len(p.plot_data))
x, y = points.get_offsets().T
npt.assert_array_equal(x, p.statistic)
npt.assert_array_equal(y, np.arange(len(p.plot_data)))
for got_color, want_color in zip(points.get_facecolors(),
p.colors):
npt.assert_array_equal(got_color[:-1], want_color)
def test_draw_vertical_nested_points(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
nt.assert_equal(len(ax.collections), 2)
nt.assert_equal(len(ax.lines),
len(p.plot_data) * len(p.hue_names) + len(p.hue_names))
for points, stats, color in zip(ax.collections,
p.statistic.T,
p.colors):
nt.assert_equal(len(points.get_offsets()), len(p.plot_data))
x, y = points.get_offsets().T
npt.assert_array_equal(x, np.arange(len(p.plot_data)))
npt.assert_array_equal(y, stats)
for got_color in points.get_facecolors():
npt.assert_array_equal(got_color[:-1], color)
def test_draw_horizontal_nested_points(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", hue="h", orient="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
nt.assert_equal(len(ax.collections), 2)
nt.assert_equal(len(ax.lines),
len(p.plot_data) * len(p.hue_names) + len(p.hue_names))
for points, stats, color in zip(ax.collections,
p.statistic.T,
p.colors):
nt.assert_equal(len(points.get_offsets()), len(p.plot_data))
x, y = points.get_offsets().T
npt.assert_array_equal(x, stats)
npt.assert_array_equal(y, np.arange(len(p.plot_data)))
for got_color in points.get_facecolors():
npt.assert_array_equal(got_color[:-1], color)
def test_pointplot_colors(self):
# Test a single-color unnested plot
color = (.2, .2, .3, 1)
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df, color=color)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
for line in ax.lines:
nt.assert_equal(line.get_color(), color[:-1])
for got_color in ax.collections[0].get_facecolors():
npt.assert_array_equal(got_color, color)
plt.close("all")
# Test a multi-color unnested plot
palette = palettes.color_palette("Set1", 3)
kws.update(x="g", y="y", data=self.df, palette="Set1")
p = cat._PointPlotter(**kws)
nt.assert_true(not p.join)
f, ax = plt.subplots()
p.draw_points(ax)
for line, pal_color in zip(ax.lines, palette):
npt.assert_array_equal(line.get_color(), pal_color)
for point_color, pal_color in zip(ax.collections[0].get_facecolors(),
palette):
npt.assert_array_equal(point_color[:-1], pal_color)
plt.close("all")
# Test a multi-colored nested plot
palette = palettes.color_palette("dark", 2)
kws.update(x="g", y="y", hue="h", data=self.df, palette="dark")
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
for line in ax.lines[:(len(p.plot_data) + 1)]:
nt.assert_equal(line.get_color(), palette[0])
for line in ax.lines[(len(p.plot_data) + 1):]:
nt.assert_equal(line.get_color(), palette[1])
for i, pal_color in enumerate(palette):
for point_color in ax.collections[i].get_facecolors():
npt.assert_array_equal(point_color[:-1], pal_color)
plt.close("all")
def test_simple_pointplots(self):
ax = cat.pointplot("g", "y", data=self.df)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), len(self.g.unique()) + 1)
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "mean(y)")
plt.close("all")
ax = cat.pointplot("y", "g", orient="h", data=self.df)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), len(self.g.unique()) + 1)
nt.assert_equal(ax.get_xlabel(), "mean(y)")
nt.assert_equal(ax.get_ylabel(), "g")
plt.close("all")
ax = cat.pointplot("g", "y", "h", data=self.df)
nt.assert_equal(len(ax.collections), len(self.h.unique()))
nt.assert_equal(len(ax.lines),
(len(self.g.unique())
* len(self.h.unique())
+ len(self.h.unique())))
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "mean(y)")
plt.close("all")
ax = cat.pointplot("y", "g", "h", orient="h", data=self.df)
nt.assert_equal(len(ax.collections), len(self.h.unique()))
nt.assert_equal(len(ax.lines),
(len(self.g.unique())
* len(self.h.unique())
+ len(self.h.unique())))
nt.assert_equal(ax.get_xlabel(), "mean(y)")
nt.assert_equal(ax.get_ylabel(), "g")
plt.close("all")
class TestCountPlot(CategoricalFixture):
def test_plot_elements(self):
ax = cat.countplot("g", data=self.df)
nt.assert_equal(len(ax.patches), self.g.unique().size)
for p in ax.patches:
nt.assert_equal(p.get_y(), 0)
nt.assert_equal(p.get_height(),
self.g.size / self.g.unique().size)
plt.close("all")
ax = cat.countplot(y="g", data=self.df)
nt.assert_equal(len(ax.patches), self.g.unique().size)
for p in ax.patches:
nt.assert_equal(p.get_x(), 0)
nt.assert_equal(p.get_width(),
self.g.size / self.g.unique().size)
plt.close("all")
ax = cat.countplot("g", hue="h", data=self.df)
nt.assert_equal(len(ax.patches),
self.g.unique().size * self.h.unique().size)
plt.close("all")
ax = cat.countplot(y="g", hue="h", data=self.df)
nt.assert_equal(len(ax.patches),
self.g.unique().size * self.h.unique().size)
plt.close("all")
def test_input_error(self):
with nt.assert_raises(TypeError):
cat.countplot()
with nt.assert_raises(TypeError):
cat.countplot(x="g", y="h", data=self.df)
class TestFactorPlot(CategoricalFixture):
def test_facet_organization(self):
g = cat.factorplot("g", "y", data=self.df)
nt.assert_equal(g.axes.shape, (1, 1))
g = cat.factorplot("g", "y", col="h", data=self.df)
nt.assert_equal(g.axes.shape, (1, 2))
g = cat.factorplot("g", "y", row="h", data=self.df)
nt.assert_equal(g.axes.shape, (2, 1))
g = cat.factorplot("g", "y", col="u", row="h", data=self.df)
nt.assert_equal(g.axes.shape, (2, 3))
def test_plot_elements(self):
g = cat.factorplot("g", "y", data=self.df)
nt.assert_equal(len(g.ax.collections), 1)
want_lines = self.g.unique().size + 1
nt.assert_equal(len(g.ax.lines), want_lines)
g = cat.factorplot("g", "y", "h", data=self.df)
want_collections = self.h.unique().size
nt.assert_equal(len(g.ax.collections), want_collections)
want_lines = (self.g.unique().size + 1) * self.h.unique().size
nt.assert_equal(len(g.ax.lines), want_lines)
g = cat.factorplot("g", "y", data=self.df, kind="bar")
want_elements = self.g.unique().size
nt.assert_equal(len(g.ax.patches), want_elements)
nt.assert_equal(len(g.ax.lines), want_elements)
g = cat.factorplot("g", "y", "h", data=self.df, kind="bar")
want_elements = self.g.unique().size * self.h.unique().size
nt.assert_equal(len(g.ax.patches), want_elements)
nt.assert_equal(len(g.ax.lines), want_elements)
g = cat.factorplot("g", data=self.df, kind="count")
want_elements = self.g.unique().size
nt.assert_equal(len(g.ax.patches), want_elements)
nt.assert_equal(len(g.ax.lines), 0)
g = cat.factorplot("g", hue="h", data=self.df, kind="count")
want_elements = self.g.unique().size * self.h.unique().size
nt.assert_equal(len(g.ax.patches), want_elements)
nt.assert_equal(len(g.ax.lines), 0)
g = cat.factorplot("g", "y", data=self.df, kind="box")
want_artists = self.g.unique().size
nt.assert_equal(len(g.ax.artists), want_artists)
g = cat.factorplot("g", "y", "h", data=self.df, kind="box")
want_artists = self.g.unique().size * self.h.unique().size
nt.assert_equal(len(g.ax.artists), want_artists)
g = cat.factorplot("g", "y", data=self.df,
kind="violin", inner=None)
want_elements = self.g.unique().size
nt.assert_equal(len(g.ax.collections), want_elements)
g = cat.factorplot("g", "y", "h", data=self.df,
kind="violin", inner=None)
want_elements = self.g.unique().size * self.h.unique().size
nt.assert_equal(len(g.ax.collections), want_elements)
g = cat.factorplot("g", "y", data=self.df, kind="strip")
want_elements = self.g.unique().size
nt.assert_equal(len(g.ax.collections), want_elements)
g = cat.factorplot("g", "y", "h", data=self.df, kind="strip")
want_elements = self.g.unique().size * self.h.unique().size
nt.assert_equal(len(g.ax.collections), want_elements)
def test_bad_plot_kind_error(self):
with nt.assert_raises(ValueError):
cat.factorplot("g", "y", data=self.df, kind="not_a_kind")
def test_count_x_and_y(self):
with nt.assert_raises(ValueError):
cat.factorplot("g", "y", data=self.df, kind="count")
def test_plot_colors(self):
ax = cat.barplot("g", "y", data=self.df)
g = cat.factorplot("g", "y", data=self.df, kind="bar")
for p1, p2 in zip(ax.patches, g.ax.patches):
nt.assert_equal(p1.get_facecolor(), p2.get_facecolor())
plt.close("all")
ax = cat.barplot("g", "y", data=self.df, color="purple")
g = cat.factorplot("g", "y", data=self.df,
kind="bar", color="purple")
for p1, p2 in zip(ax.patches, g.ax.patches):
nt.assert_equal(p1.get_facecolor(), p2.get_facecolor())
plt.close("all")
ax = cat.barplot("g", "y", data=self.df, palette="Set2")
g = cat.factorplot("g", "y", data=self.df,
kind="bar", palette="Set2")
for p1, p2 in zip(ax.patches, g.ax.patches):
nt.assert_equal(p1.get_facecolor(), p2.get_facecolor())
plt.close("all")
ax = cat.pointplot("g", "y", data=self.df)
g = cat.factorplot("g", "y", data=self.df)
for l1, l2 in zip(ax.lines, g.ax.lines):
nt.assert_equal(l1.get_color(), l2.get_color())
plt.close("all")
ax = cat.pointplot("g", "y", data=self.df, color="purple")
g = cat.factorplot("g", "y", data=self.df, color="purple")
for l1, l2 in zip(ax.lines, g.ax.lines):
nt.assert_equal(l1.get_color(), l2.get_color())
plt.close("all")
ax = cat.pointplot("g", "y", data=self.df, palette="Set2")
g = cat.factorplot("g", "y", data=self.df, palette="Set2")
for l1, l2 in zip(ax.lines, g.ax.lines):
nt.assert_equal(l1.get_color(), l2.get_color())
plt.close("all")
| bsd-3-clause | 4,935,085,947,800,979,000 | 34.038228 | 79 | 0.542371 | false |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/io/array/array.py | 4 | 1708 | """Tools for creating Raw objects from numpy arrays"""
# Authors: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from ..base import _BaseRaw
from ...utils import verbose, logger
class RawArray(_BaseRaw):
"""Raw object from numpy array
Parameters
----------
data : array, shape (n_channels, n_times)
The channels' time series.
info : instance of Info
Info dictionary. Consider using `create_info` to populate
this structure.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
EpochsArray, EvokedArray, create_info
"""
@verbose
def __init__(self, data, info, verbose=None):
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 2:
raise ValueError('Data must be a 2D array of shape (n_channels, '
'n_samples')
logger.info('Creating RawArray with %s data, n_channels=%s, n_times=%s'
% (dtype.__name__, data.shape[0], data.shape[1]))
if len(data) != len(info['ch_names']):
raise ValueError('len(data) does not match len(info["ch_names"])')
assert len(info['ch_names']) == info['nchan']
super(RawArray, self).__init__(info, data, verbose=verbose)
logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % (
self.first_samp, self.last_samp,
float(self.first_samp) / info['sfreq'],
float(self.last_samp) / info['sfreq']))
logger.info('Ready.')
| bsd-3-clause | -3,811,285,579,663,346,700 | 33.16 | 79 | 0.574356 | false |
Scopart/GitDiffHelper | git/cmd.py | 1 | 7200 | # cmd.py
# Copyright (C) 2008-2010 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import os, sys
import subprocess
import re
from utils import *
from errors import GitCommandError
# Enables debugging of GitPython's git commands
GIT_PYTHON_TRACE = os.environ.get("GIT_PYTHON_TRACE", False)
execute_kwargs = ('istream', 'with_keep_cwd', 'with_extended_output',
'with_exceptions', 'with_raw_output')
extra = {}
if sys.platform == 'win32':
extra = {'shell': True}
class Git(object):
"""
The Git class manages communication with the Git binary.
It provides a convenient interface to calling the Git binary, such as in::
g = Git( git_dir )
g.init() # calls 'git init' program
rval = g.ls_files() # calls 'git ls-files' program
``Debugging``
Set the GIT_PYTHON_TRACE environment variable print each invocation
of the command to stdout.
Set its value to 'full' to see details about the returned values.
"""
def __init__(self, git_dir=None):
"""
Initialize this instance with:
``git_dir``
Git directory we should work in. If None, we always work in the current
directory as returned by os.getcwd()
"""
super(Git, self).__init__()
self.git_dir = git_dir
def __getattr__(self, name):
"""
A convenience method as it allows to call the command as if it was
an object.
Returns
Callable object that will execute call _call_process with your arguments.
"""
if name[:1] == '_':
raise AttributeError(name)
return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)
@property
def get_dir(self):
"""
Returns
Git directory we are working on
"""
return self.git_dir
def execute(self, command,
istream=None,
with_keep_cwd=False,
with_extended_output=False,
with_exceptions=True,
with_raw_output=False,
):
"""
Handles executing the command on the shell and consumes and returns
the returned information (stdout)
``command``
The command argument list to execute.
It should be a string, or a sequence of program arguments. The
program to execute is the first item in the args sequence or string.
``istream``
Standard input filehandle passed to subprocess.Popen.
``with_keep_cwd``
Whether to use the current working directory from os.getcwd().
GitPython uses get_work_tree() as its working directory by
default and get_git_dir() for bare repositories.
``with_extended_output``
Whether to return a (status, stdout, stderr) tuple.
``with_exceptions``
Whether to raise an exception when git returns a non-zero status.
``with_raw_output``
Whether to avoid stripping off trailing whitespace.
Returns::
str(output) # extended_output = False (Default)
tuple(int(status), str(stdout), str(stderr)) # extended_output = True
Raise
GitCommandError
NOTE
If you add additional keyword arguments to the signature of this method,
you must update the execute_kwargs tuple housed in this module.
"""
if GIT_PYTHON_TRACE and not GIT_PYTHON_TRACE == 'full':
print(' '.join(command))
# Allow the user to have the command executed in their working dir.
if with_keep_cwd or self.git_dir is None:
cwd = os.getcwd()
else:
cwd=self.git_dir
# Start the process
proc = subprocess.Popen(command,
cwd=cwd,
stdin=istream,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
**extra
)
# Wait for the process to return
try:
stdout_value = proc.stdout.read()
stderr_value = proc.stderr.read()
status = proc.wait()
finally:
proc.stdout.close()
proc.stderr.close()
# Strip off trailing whitespace by default
if not with_raw_output:
stdout_value = stdout_value.rstrip()
stderr_value = stderr_value.rstrip()
if with_exceptions and status != 0:
raise GitCommandError(command, status, stderr_value)
if GIT_PYTHON_TRACE == 'full':
if stderr_value:
print("%s -> %d: '%s' !! '%s'" % (command, status, stdout_value, stderr_value))
elif stdout_value:
print("%s -> %d: '%s'" % (command, status, stdout_value))
else:
print("%s -> %d" % (command, status))
# Allow access to the command's status code
if with_extended_output:
return (status, stdout_value, stderr_value)
else:
return stdout_value
def transform_kwargs(self, **kwargs):
"""
Transforms Python style kwargs into git command line options.
"""
args = []
for k, v in kwargs.items():
if len(k) == 1:
if v is True:
args.append("-%s" % k)
elif type(v) is not bool:
args.append("-%s%s" % (k, v))
else:
if v is True:
args.append("--%s" % dashify(k))
elif type(v) is not bool:
args.append("--%s=%s" % (dashify(k), v))
return args
def _call_process(self, method, *args, **kwargs):
"""
Run the given git command with the specified arguments and return
the result as a String
``method``
is the command. Contained "_" characters will be converted to dashes,
such as in 'ls_files' to call 'ls-files'.
``args``
is the list of arguments
``kwargs``
is a dict of keyword arguments.
This function accepts the same optional keyword arguments
as execute().
Examples::
git.rev_list('master', max_count=10, header=True)
Returns
Same as execute()
"""
# Handle optional arguments prior to calling transform_kwargs
# otherwise these'll end up in args, which is bad.
_kwargs = {}
for kwarg in execute_kwargs:
try:
_kwargs[kwarg] = kwargs.pop(kwarg)
except KeyError:
pass
# Prepare the argument list
opt_args = self.transform_kwargs(**kwargs)
ext_args = map(str, args)
args = opt_args + ext_args
call = ["git", dashify(method)]
call.extend(args)
return self.execute(call, **_kwargs)
| mit | 6,160,838,160,962,607,000 | 31.570136 | 93 | 0.54793 | false |
daisychainme/daisychain | daisychain/home/tests/tests.py | 1 | 3888 | from allauth.account.models import EmailAddress
from django.contrib.auth.models import User
from django.test import TestCase, RequestFactory
from django.test.client import Client
class TestLoginView(TestCase):
def create_user(self):
user = User.objects.create_user(username='Superuser')
user.set_password('Password')
user.is_superuser = True
user.save()
EmailAddress.objects.create(user=user,
email='[email protected]',
primary=True,
verified=True)
# self.client.login(username='Superuser', password='Password')
return user
def setUp(self):
# Every test needs access to the request factory.
self.factory = RequestFactory()
self.client = Client()
self.user = self.create_user()
def test_login_mapping_works(self):
response = self.client.get('/accounts/login/')
self.assertEquals(response.status_code, 200)
def test_login_shows_template(self):
response = self.client.get('/accounts/login/')
self.assertTemplateUsed(response=response,
template_name='account/login.html')
def test_login(self):
response = self.client.post(
'/accounts/login/',
{'login': 'Superuser', 'password': 'Password'}, follow=True)
self.assertRedirects(response, '/home/')
def test_failed_login_with_wrong_password(self):
response = self.client.post(
'/accounts/login/',
{'login': 'Superuser', 'password': 'a'},
follow=True)
self.assertContains(
response=response,
text="The login and/or password you specified are not correct.",
status_code=200)
def test_failed_login_with_wrong_username(self):
response = self.client.post(
'/accounts/login/',
{'login': 'Super', 'password': 'Password'},
follow=True)
self.assertContains(
response=response,
text="The login and/or password you specified are not correct.",
status_code=200)
# @dmkif
def test_login_if_user_logged_in_with_next(self):
response = self.client.post(
'/accounts/login/?next=/useradmin/',
{'login': 'Superuser', 'password': 'Password'}, follow=True)
self.assertEquals(response.status_code, 200)
# @dmkif
def test_login_if_user_logged_in_without_next(self):
response = self.client.post(
'/accounts/login/',
{'login': 'Superuser', 'password': 'Password'}, follow=True)
response = self.client.get('/accounts/login/')
self.assertEquals(response.status_code, 302)
def test_index_shows_template(self):
response = self.client.get('/home/')
self.assertTemplateUsed(response=response,
template_name='home/index.html')
def test_impressum_shows_template(self):
response = self.client.get('/home/impressum')
self.assertTemplateUsed(response=response,
template_name='home/impressum.html')
def test_privacypolicy_shows_template(self):
response = self.client.get('/home/privacy_policy')
self.assertTemplateUsed(response=response,
template_name='home/privacy_policy.html')
"""
class RegisterViewTest(TestCase):
def create_user(self):
user = User.objects.create_user('User', '[email protected]', 'hunter2')
user.save()
return user
def setUp(self):
# Every test needs access to the request factory.
self.factory = RequestFactory()
self.client = Client()
self.user = self.create_user()
class LogoutViewTest(TestCase):
pass
"""
| mit | 7,070,913,126,363,421,000 | 34.669725 | 78 | 0.595422 | false |
LordDamionDevil/Lony | lib/youtube_dl/extractor/hitbox.py | 14 | 7055 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
parse_iso8601,
float_or_none,
int_or_none,
compat_str,
determine_ext,
)
class HitboxIE(InfoExtractor):
IE_NAME = 'hitbox'
_VALID_URL = r'https?://(?:www\.)?hitbox\.tv/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.hitbox.tv/video/203213',
'info_dict': {
'id': '203213',
'title': 'hitbox @ gamescom, Sub Button Hype extended, Giveaway - hitbox News Update with Oxy',
'alt_title': 'hitboxlive - Aug 9th #6',
'description': '',
'ext': 'mp4',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 215.1666,
'resolution': 'HD 720p',
'uploader': 'hitboxlive',
'view_count': int,
'timestamp': 1407576133,
'upload_date': '20140809',
'categories': ['Live Show'],
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _extract_metadata(self, url, video_id):
thumb_base = 'https://edge.sf.hitbox.tv'
metadata = self._download_json(
'%s/%s' % (url, video_id), video_id,
'Downloading metadata JSON')
date = 'media_live_since'
media_type = 'livestream'
if metadata.get('media_type') == 'video':
media_type = 'video'
date = 'media_date_added'
video_meta = metadata.get(media_type, [])[0]
title = video_meta.get('media_status')
alt_title = video_meta.get('media_title')
description = clean_html(
video_meta.get('media_description') or
video_meta.get('media_description_md'))
duration = float_or_none(video_meta.get('media_duration'))
uploader = video_meta.get('media_user_name')
views = int_or_none(video_meta.get('media_views'))
timestamp = parse_iso8601(video_meta.get(date), ' ')
categories = [video_meta.get('category_name')]
thumbs = [
{'url': thumb_base + video_meta.get('media_thumbnail'),
'width': 320,
'height': 180},
{'url': thumb_base + video_meta.get('media_thumbnail_large'),
'width': 768,
'height': 432},
]
return {
'id': video_id,
'title': title,
'alt_title': alt_title,
'description': description,
'ext': 'mp4',
'thumbnails': thumbs,
'duration': duration,
'uploader': uploader,
'view_count': views,
'timestamp': timestamp,
'categories': categories,
}
def _real_extract(self, url):
video_id = self._match_id(url)
player_config = self._download_json(
'https://www.hitbox.tv/api/player/config/video/%s' % video_id,
video_id, 'Downloading video JSON')
formats = []
for video in player_config['clip']['bitrates']:
label = video.get('label')
if label == 'Auto':
continue
video_url = video.get('url')
if not video_url:
continue
bitrate = int_or_none(video.get('bitrate'))
if determine_ext(video_url) == 'm3u8':
if not video_url.startswith('http'):
continue
formats.append({
'url': video_url,
'ext': 'mp4',
'tbr': bitrate,
'format_note': label,
'protocol': 'm3u8_native',
})
else:
formats.append({
'url': video_url,
'tbr': bitrate,
'format_note': label,
})
self._sort_formats(formats)
metadata = self._extract_metadata(
'https://www.hitbox.tv/api/media/video',
video_id)
metadata['formats'] = formats
return metadata
class HitboxLiveIE(HitboxIE):
IE_NAME = 'hitbox:live'
_VALID_URL = r'https?://(?:www\.)?hitbox\.tv/(?!video)(?P<id>.+)'
_TEST = {
'url': 'http://www.hitbox.tv/dimak',
'info_dict': {
'id': 'dimak',
'ext': 'mp4',
'description': 'md5:c9f80fa4410bc588d7faa40003fc7d0e',
'timestamp': int,
'upload_date': compat_str,
'title': compat_str,
'uploader': 'Dimak',
},
'params': {
# live
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
player_config = self._download_json(
'https://www.hitbox.tv/api/player/config/live/%s' % video_id,
video_id)
formats = []
cdns = player_config.get('cdns')
servers = []
for cdn in cdns:
# Subscribe URLs are not playable
if cdn.get('rtmpSubscribe') is True:
continue
base_url = cdn.get('netConnectionUrl')
host = re.search(r'.+\.([^\.]+\.[^\./]+)/.+', base_url).group(1)
if base_url not in servers:
servers.append(base_url)
for stream in cdn.get('bitrates'):
label = stream.get('label')
if label == 'Auto':
continue
stream_url = stream.get('url')
if not stream_url:
continue
bitrate = int_or_none(stream.get('bitrate'))
if stream.get('provider') == 'hls' or determine_ext(stream_url) == 'm3u8':
if not stream_url.startswith('http'):
continue
formats.append({
'url': stream_url,
'ext': 'mp4',
'tbr': bitrate,
'format_note': label,
'rtmp_live': True,
})
else:
formats.append({
'url': '%s/%s' % (base_url, stream_url),
'ext': 'mp4',
'tbr': bitrate,
'rtmp_live': True,
'format_note': host,
'page_url': url,
'player_url': 'http://www.hitbox.tv/static/player/flowplayer/flowplayer.commercial-3.2.16.swf',
})
self._sort_formats(formats)
metadata = self._extract_metadata(
'https://www.hitbox.tv/api/media/live',
video_id)
metadata['formats'] = formats
metadata['is_live'] = True
metadata['title'] = self._live_title(metadata.get('title'))
return metadata
| gpl-3.0 | -7,631,029,817,710,119,000 | 33.247573 | 123 | 0.45854 | false |
dstftw/youtube-dl | youtube_dl/update.py | 7 | 6908 | from __future__ import unicode_literals
import io
import json
import traceback
import hashlib
import os
import subprocess
import sys
from zipimport import zipimporter
from .utils import encode_compat_str
from .version import __version__
def rsa_verify(message, signature, key):
from hashlib import sha256
assert isinstance(message, bytes)
byte_size = (len(bin(key[0])) - 2 + 8 - 1) // 8
signature = ('%x' % pow(int(signature, 16), key[1], key[0])).encode()
signature = (byte_size * 2 - len(signature)) * b'0' + signature
asn1 = b'3031300d060960864801650304020105000420'
asn1 += sha256(message).hexdigest().encode()
if byte_size < len(asn1) // 2 + 11:
return False
expected = b'0001' + (byte_size - len(asn1) // 2 - 3) * b'ff' + b'00' + asn1
return expected == signature
def update_self(to_screen, verbose, opener):
"""Update the program file with the latest version from the repository"""
UPDATE_URL = 'https://yt-dl.org/update/'
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, 'frozen'):
to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.')
return
# Check if there is a new version
try:
newversion = opener.open(VERSION_URL).read().decode('utf-8').strip()
except Exception:
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: can\'t find the current version. Please try again later.')
return
if newversion == __version__:
to_screen('youtube-dl is up-to-date (' + __version__ + ')')
return
# Download and check versions info
try:
versions_info = opener.open(JSON_URL).read().decode('utf-8')
versions_info = json.loads(versions_info)
except Exception:
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: can\'t obtain versions info. Please try again later.')
return
if 'signature' not in versions_info:
to_screen('ERROR: the versions file is not signed or corrupted. Aborting.')
return
signature = versions_info['signature']
del versions_info['signature']
if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
to_screen('ERROR: the versions file signature is invalid. Aborting.')
return
version_id = versions_info['latest']
def version_tuple(version_str):
return tuple(map(int, version_str.split('.')))
if version_tuple(__version__) >= version_tuple(version_id):
to_screen('youtube-dl is up to date (%s)' % __version__)
return
to_screen('Updating to version ' + version_id + ' ...')
version = versions_info['versions'][version_id]
print_notes(to_screen, versions_info['versions'])
# sys.executable is set to the full pathname of the exe-file for py2exe
filename = sys.executable if hasattr(sys, 'frozen') else sys.argv[0]
if not os.access(filename, os.W_OK):
to_screen('ERROR: no write permissions on %s' % filename)
return
# Py2EXE
if hasattr(sys, 'frozen'):
exe = filename
directory = os.path.dirname(exe)
if not os.access(directory, os.W_OK):
to_screen('ERROR: no write permissions on %s' % directory)
return
try:
urlh = opener.open(version['exe'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to download latest version')
return
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['exe'][1]:
to_screen('ERROR: the downloaded file hash does not match. Aborting.')
return
try:
with open(exe + '.new', 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to write the new version')
return
try:
bat = os.path.join(directory, 'youtube-dl-updater.bat')
with io.open(bat, 'w') as batfile:
batfile.write('''
@echo off
echo Waiting for file handle to be closed ...
ping 127.0.0.1 -n 5 -w 1000 > NUL
move /Y "%s.new" "%s" > NUL
echo Updated youtube-dl to version %s.
start /b "" cmd /c del "%%~f0"&exit /b"
\n''' % (exe, exe, version_id))
subprocess.Popen([bat]) # Continues to run in the background
return # Do not show premature success messages
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to overwrite current version')
return
# Zip unix package
elif isinstance(globals().get('__loader__'), zipimporter):
try:
urlh = opener.open(version['bin'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to download latest version')
return
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['bin'][1]:
to_screen('ERROR: the downloaded file hash does not match. Aborting.')
return
try:
with open(filename, 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError):
if verbose:
to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to overwrite current version')
return
to_screen('Updated youtube-dl. Restart youtube-dl to use the new version.')
def get_notes(versions, fromVersion):
notes = []
for v, vdata in sorted(versions.items()):
if v > fromVersion:
notes.extend(vdata.get('notes', []))
return notes
def print_notes(to_screen, versions, fromVersion=__version__):
notes = get_notes(versions, fromVersion)
if notes:
to_screen('PLEASE NOTE:')
for note in notes:
to_screen(note)
| unlicense | -4,118,087,812,083,312,000 | 35.941176 | 289 | 0.6174 | false |
djkonro/coala | coalib/processes/BearRunning.py | 2 | 24068 | import queue
import traceback
from coalib.bears.BEAR_KIND import BEAR_KIND
from coalib.bears.GlobalBear import GlobalBear
from coalib.bears.LocalBear import LocalBear
from coalib.misc import Constants
from coalib.processes.communication.LogMessage import LOG_LEVEL, LogMessage
from coalib.processes.CONTROL_ELEMENT import CONTROL_ELEMENT
from coalib.results.Result import Result
def send_msg(message_queue, timeout, log_level, *args, delimiter=' ', end=''):
"""
Puts message into message queue for a LogPrinter to present to the user.
:param message_queue: The queue to put the message into and which the
LogPrinter reads.
:param timeout: The queue blocks at most timeout seconds for a free
slot to execute the put operation on. After the
timeout it returns queue Full exception.
:param log_level: The log_level i.e Error,Debug or Warning.It is sent
to the LogPrinter depending on the message.
:param args: This includes the elements of the message.
:param delimiter: It is the value placed between each arg. By default
it is a ' '.
:param end: It is the value placed at the end of the message.
"""
output = str(delimiter).join(str(arg) for arg in args) + str(end)
message_queue.put(LogMessage(log_level, output),
timeout=timeout)
def validate_results(message_queue, timeout, result_list, name, args, kwargs):
"""
Validates if the result_list passed to it contains valid set of results.
That is the result_list must itself be a list and contain objects of the
instance of Result object. If any irregularity is found a message is put in
the message_queue to present the irregularity to the user. Each result_list
belongs to an execution of a bear.
:param message_queue: A queue that contains messages of type
errors/warnings/debug statements to be printed in the
Log.
:param timeout: The queue blocks at most timeout seconds for a free
slot to execute the put operation on. After the
timeout it returns queue Full exception.
:param result_list: The list of results to validate.
:param name: The name of the bear executed.
:param args: The args with which the bear was executed.
:param kwargs: The kwargs with which the bear was executed.
:return: Returns None if the result_list is invalid. Else it
returns the result_list itself.
"""
if result_list is None:
return None
for result in result_list:
if not isinstance(result, Result):
send_msg(message_queue,
timeout,
LOG_LEVEL.ERROR,
"The results from the bear {bear} could only be "
"partially processed with arguments {arglist}, "
"{kwarglist}"
.format(bear=name, arglist=args, kwarglist=kwargs))
send_msg(message_queue,
timeout,
LOG_LEVEL.DEBUG,
"One of the results in the list for the bear {bear} is "
"an instance of {ret} but it should be an instance of "
"Result"
.format(bear=name, ret=result.__class__))
result_list.remove(result)
return result_list
def run_bear(message_queue, timeout, bear_instance, *args, **kwargs):
"""
This method is responsible for executing the instance of a bear. It also
reports or logs errors if any occur during the execution of that bear
instance.
:param message_queue: A queue that contains messages of type
errors/warnings/debug statements to be printed in the
Log.
:param timeout: The queue blocks at most timeout seconds for a free
slot to execute the put operation on. After the
timeout it returns queue Full exception.
:param bear_instance: The instance of the bear to be executed.
:param args: The arguments that are to be passed to the bear.
:param kwargs: The keyword arguments that are to be passed to the
bear.
:return: Returns a valid list of objects of the type Result
if the bear executed succesfully. None otherwise.
"""
if kwargs.get("dependency_results", True) is None:
del kwargs["dependency_results"]
name = bear_instance.name
try:
result_list = bear_instance.execute(*args, **kwargs)
except:
send_msg(message_queue,
timeout,
LOG_LEVEL.ERROR,
"The bear {bear} failed to run with the arguments "
"{arglist}, {kwarglist}. Skipping bear..."
.format(bear=name, arglist=args, kwarglist=kwargs))
send_msg(message_queue,
timeout,
LOG_LEVEL.DEBUG,
"Traceback for error in bear {bear}:"
.format(bear=name),
traceback.format_exc(),
delimiter="\n")
return None
return validate_results(message_queue,
timeout,
result_list,
name,
args,
kwargs)
def get_local_dependency_results(local_result_list, bear_instance):
"""
This method gets all the results originating from the dependencies of a
bear_instance. Each bear_instance may or may not have dependencies.
:param local_result_list: The list of results out of which the dependency
results are picked.
:param bear_instance: The instance of a local bear to get the
dependencies from.
:return: Return none if there are no dependencies for the
bear. Else return a dictionary containing
dependency results.
"""
deps = bear_instance.get_dependencies()
if deps == []:
return None
dependency_results = {}
dep_strings = []
for dep in deps:
dep_strings.append(dep.__name__)
for result in local_result_list:
if result.origin in dep_strings:
results = dependency_results.get(result.origin, [])
results.append(result)
dependency_results[result.origin] = results
return dependency_results
def run_local_bear(message_queue,
timeout,
local_result_list,
file_dict,
bear_instance,
filename):
"""
Runs an instance of a local bear. Checks if bear_instance is of type
LocalBear and then passes it to the run_bear to execute.
:param message_queue: A queue that contains messages of type
errors/warnings/debug statements to be printed in
the Log.
:param timeout: The queue blocks at most timeout seconds for a
free slot to execute the put operation on. After
the timeout it returns queue Full exception.
:param local_result_list: Its a list that stores the results of all local
bears.
:param file_dict: Dictionary containing contents of file.
:param bear_instance: Instance of LocalBear the run.
:param filename: Name of the file to run it on.
:return: Returns a list of results generated by the passed
bear_instance.
"""
if (not isinstance(bear_instance, LocalBear) or
bear_instance.kind() != BEAR_KIND.LOCAL):
send_msg(message_queue,
timeout,
LOG_LEVEL.WARNING,
"A given local bear ({}) is not valid. Leaving "
"it out...".format(bear_instance.__class__.__name__),
Constants.THIS_IS_A_BUG)
return None
kwargs = {"dependency_results":
get_local_dependency_results(local_result_list,
bear_instance)}
return run_bear(message_queue,
timeout,
bear_instance,
filename,
file_dict[filename],
**kwargs)
def run_global_bear(message_queue,
timeout,
global_bear_instance,
dependency_results):
"""
Runs an instance of a global bear. Checks if bear_instance is of type
GlobalBear and then passes it to the run_bear to execute.
:param message_queue: A queue that contains messages of type
errors/warnings/debug statements to be printed
in the Log.
:param timeout: The queue blocks at most timeout seconds for a
free slot to execute the put operation on.
After the timeout it returns queue Full
exception.
:param global_bear_instance: Instance of GlobalBear to run.
:param dependency_results: The results of all the bears on which the
instance of the passed bear to be run depends
on.
:return: Returns a list of results generated by the
passed bear_instance.
"""
if (not isinstance(global_bear_instance, GlobalBear)
or global_bear_instance.kind() != BEAR_KIND.GLOBAL):
send_msg(message_queue,
timeout,
LOG_LEVEL.WARNING,
"A given global bear ({}) is not valid. Leaving it "
"out..."
.format(global_bear_instance.__class__.__name__),
Constants.THIS_IS_A_BUG)
return None
kwargs = {"dependency_results": dependency_results}
return run_bear(message_queue,
timeout,
global_bear_instance,
**kwargs)
def run_local_bears_on_file(message_queue,
timeout,
file_dict,
local_bear_list,
local_result_dict,
control_queue,
filename):
"""
This method runs a list of local bears on one file.
:param message_queue: A queue that contains messages of type
errors/warnings/debug statements to be printed
in the Log.
:param timeout: The queue blocks at most timeout seconds for a
free slot to execute the put operation on. After
the timeout it returns queue Full exception.
:param file_dict: Dictionary that contains contents of files.
:param local_bear_list: List of local bears to run on file.
:param local_result_dict: A Manager.dict that will be used to store local
bear results. A list of all local bear results
will be stored with the filename as key.
:param control_queue: If any result gets written to the result_dict a
tuple containing a CONTROL_ELEMENT (to indicate
what kind of event happened) and either a bear
name(for global results) or a file name to
indicate the result will be put to the queue.
:param filename: The name of file on which to run the bears.
"""
if filename not in file_dict:
send_msg(message_queue,
timeout,
LOG_LEVEL.ERROR,
"An internal error occurred.",
Constants.THIS_IS_A_BUG)
send_msg(message_queue,
timeout,
LOG_LEVEL.DEBUG,
"The given file through the queue is not in the file "
"dictionary.")
return
local_result_list = []
for bear_instance in local_bear_list:
result = run_local_bear(message_queue,
timeout,
local_result_list,
file_dict,
bear_instance,
filename)
if result is not None:
local_result_list.extend(result)
local_result_dict[filename] = local_result_list
control_queue.put((CONTROL_ELEMENT.LOCAL, filename))
def get_global_dependency_results(global_result_dict, bear_instance):
"""
This method gets all the results originating from the dependencies of a
bear_instance. Each bear_instance may or may not have dependencies.
:param global_result_dict: The list of results out of which the dependency
results are picked.
:return: None if bear has no dependencies, False if
dependencies are not met, the dependency dict
otherwise.
"""
try:
deps = bear_instance.get_dependencies()
if deps == []:
return None
except AttributeError:
# When this occurs we have an invalid bear and a warning will be
# emitted later.
return None
dependency_results = {}
for dep in deps:
depname = dep.__name__
if depname not in global_result_dict:
return False
dependency_results[depname] = global_result_dict[depname]
return dependency_results
def get_next_global_bear(timeout,
global_bear_queue,
global_bear_list,
global_result_dict):
"""
Retrieves the next global bear.
:param timeout: The queue blocks at most timeout seconds for a
free slot to execute the put operation on. After
the timeout it returns queue Full exception.
:param global_bear_queue: queue (read, write) of indexes of global bear
instances in the global_bear_list.
:param global_bear_list: A list containing all global bears to be
executed.
:param global_result_dict: A Manager.dict that will be used to store global
results. The list of results of one global bear
will be stored with the bear name as key.
:return: (bear, bearname, dependency_results)
"""
dependency_results = False
while dependency_results is False:
bear_id = global_bear_queue.get(timeout=timeout)
bear = global_bear_list[bear_id]
dependency_results = (
get_global_dependency_results(global_result_dict, bear))
if dependency_results is False:
global_bear_queue.put(bear_id)
return bear, dependency_results
def task_done(obj):
"""
Invokes task_done if the given queue provides this operation. Otherwise
passes silently.
:param obj: Any object.
"""
if hasattr(obj, "task_done"):
obj.task_done()
def run_local_bears(filename_queue,
message_queue,
timeout,
file_dict,
local_bear_list,
local_result_dict,
control_queue):
"""
Run local bears on all the files given.
:param filename_queue: queue (read) of file names to check with
local bears.
:param message_queue: A queue that contains messages of type
errors/warnings/debug statements to be printed
in the Log.
:param timeout: The queue blocks at most timeout seconds for a
free slot to execute the put operation on. After
the timeout it returns queue Full exception.
:param file_dict: Dictionary that contains contents of files.
:param local_bear_list: List of local bears to run.
:param local_result_dict: A Manager.dict that will be used to store local
bear results. A list of all local bear results
will be stored with the filename as key.
:param control_queue: If any result gets written to the result_dict a
tuple containing a CONTROL_ELEMENT (to indicate
what kind of event happened) and either a bear
name(for global results) or a file name to
indicate the result will be put to the queue.
"""
try:
while True:
filename = filename_queue.get(timeout=timeout)
run_local_bears_on_file(message_queue,
timeout,
file_dict,
local_bear_list,
local_result_dict,
control_queue,
filename)
task_done(filename_queue)
except queue.Empty:
return
def run_global_bears(message_queue,
timeout,
global_bear_queue,
global_bear_list,
global_result_dict,
control_queue):
"""
Run all global bears.
:param message_queue: A queue that contains messages of type
errors/warnings/debug statements to be printed
in the Log.
:param timeout: The queue blocks at most timeout seconds for a
free slot to execute the put operation on. After
the timeout it returns queue Full exception.
:param global_bear_queue: queue (read, write) of indexes of global bear
instances in the global_bear_list.
:param global_bear_list: list of global bear instances
:param global_result_dict: A Manager.dict that will be used to store global
results. The list of results of one global bear
will be stored with the bear name as key.
:param control_queue: If any result gets written to the result_dict a
tuple containing a CONTROL_ELEMENT (to indicate
what kind of event happened) and either a bear
name(for global results) or a file name to
indicate the result will be put to the queue.
"""
try:
while True:
bear, dep_results = (
get_next_global_bear(timeout,
global_bear_queue,
global_bear_list,
global_result_dict))
bearname = bear.__class__.__name__
result = run_global_bear(message_queue, timeout, bear, dep_results)
if result:
global_result_dict[bearname] = result
control_queue.put((CONTROL_ELEMENT.GLOBAL, bearname))
else:
global_result_dict[bearname] = None
task_done(global_bear_queue)
except queue.Empty:
return
def run(file_name_queue,
local_bear_list,
global_bear_list,
global_bear_queue,
file_dict,
local_result_dict,
global_result_dict,
message_queue,
control_queue,
timeout=0):
"""
This is the method that is actually runs by processes.
If parameters type is 'queue (read)' this means it has to implement the
get(timeout=TIMEOUT) method and it shall raise queue.Empty if the queue
is empty up until the end of the timeout. If the queue has the
(optional!) task_done() attribute, the run method will call it after
processing each item.
If parameters type is 'queue (write)' it shall implement the
put(object, timeout=TIMEOUT) method.
If the queues raise any exception not specified here the user will get
an 'unknown error' message. So beware of that.
:param file_name_queue: queue (read) of file names to check with local
bears. Each invocation of the run method needs
one such queue which it checks with all the
local bears. The queue could be empty.
(Repeat until queue empty.)
:param local_bear_list: List of local bear instances.
:param global_bear_list: List of global bear instances.
:param global_bear_queue: queue (read, write) of indexes of global bear
instances in the global_bear_list.
:param file_dict: dict of all files as {filename:file}, file as in
file.readlines().
:param local_result_dict: A Manager.dict that will be used to store local
results. A list of all local results.
will be stored with the filename as key.
:param global_result_dict: A Manager.dict that will be used to store global
results. The list of results of one global bear
will be stored with the bear name as key.
:param message_queue: queue (write) for debug/warning/error
messages (type LogMessage)
:param control_queue: queue (write). If any result gets written to the
result_dict a tuple containing a CONTROL_ELEMENT
(to indicate what kind of event happened) and
either a bear name (for global results) or a
file name to indicate the result will be put to
the queue. If the run method finished all its
local bears it will put
(CONTROL_ELEMENT.LOCAL_FINISHED, None) to the
queue, if it finished all global ones,
(CONTROL_ELEMENT.GLOBAL_FINISHED, None) will
be put there.
:param timeout: The queue blocks at most timeout seconds for a
free slot to execute the put operation on. After
the timeout it returns queue Full exception.
"""
try:
run_local_bears(file_name_queue,
message_queue,
timeout,
file_dict,
local_bear_list,
local_result_dict,
control_queue)
control_queue.put((CONTROL_ELEMENT.LOCAL_FINISHED, None))
run_global_bears(message_queue,
timeout,
global_bear_queue,
global_bear_list,
global_result_dict,
control_queue)
control_queue.put((CONTROL_ELEMENT.GLOBAL_FINISHED, None))
except (OSError, KeyboardInterrupt): # pragma: no cover
pass
| agpl-3.0 | 5,439,513,065,143,898,000 | 43 | 79 | 0.538641 | false |
8u1a/plaso | tests/formatters/file_system.py | 1 | 1752 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the file system stat event formatter."""
import unittest
from plaso.formatters import file_system
from tests.formatters import test_lib
class FileStatEventFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the file system stat event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = file_system.FileStatEventFormatter()
self.assertNotEqual(event_formatter, None)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = file_system.FileStatEventFormatter()
expected_attribute_names = [u'display_name', u'unallocated']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
# TODO: add test for GetSources.
class NTFSFileStatEventFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the NFTS file system stat event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = file_system.NTFSFileStatEventFormatter()
self.assertNotEqual(event_formatter, None)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = file_system.NTFSFileStatEventFormatter()
expected_attribute_names = [
u'attribute_name', u'display_name', u'file_reference', u'name',
u'parent_file_reference', u'unallocated']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
# TODO: add test for GetSources.
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 8,404,195,075,121,901,000 | 29.736842 | 71 | 0.734589 | false |
Emergya/icm-openedx-educamadrid-platform-basic | lms/djangoapps/discussion_api/api.py | 14 | 28661 | """
Discussion API internal interface
"""
from collections import defaultdict
from urllib import urlencode
from urlparse import urlunparse
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.http import Http404
import itertools
from rest_framework.exceptions import PermissionDenied
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locator import CourseKey
from courseware.courses import get_course_with_access
from discussion_api.forms import CommentActionsForm, ThreadActionsForm
from discussion_api.pagination import get_paginated_data
from discussion_api.permissions import (
can_delete,
get_editable_fields,
get_initializable_comment_fields,
get_initializable_thread_fields,
)
from discussion_api.serializers import CommentSerializer, ThreadSerializer, get_context
from django_comment_client.base.views import track_comment_created_event, track_thread_created_event
from django_comment_common.signals import (
thread_created,
thread_edited,
thread_deleted,
thread_voted,
comment_created,
comment_edited,
comment_voted,
comment_deleted,
)
from django_comment_client.utils import get_accessible_discussion_modules, is_commentable_cohorted
from lms.lib.comment_client.comment import Comment
from lms.lib.comment_client.thread import Thread
from lms.lib.comment_client.utils import CommentClientRequestError
from openedx.core.djangoapps.course_groups.cohorts import get_cohort_id
def _get_course_or_404(course_key, user):
"""
Get the course descriptor, raising Http404 if the course is not found,
the user cannot access forums for the course, or the discussion tab is
disabled for the course.
"""
course = get_course_with_access(user, 'load', course_key, check_if_enrolled=True)
if not any([tab.type == 'discussion' and tab.is_enabled(course, user) for tab in course.tabs]):
raise Http404
return course
def _get_thread_and_context(request, thread_id, retrieve_kwargs=None):
"""
Retrieve the given thread and build a serializer context for it, returning
both. This function also enforces access control for the thread (checking
both the user's access to the course and to the thread's cohort if
applicable). Raises Http404 if the thread does not exist or the user cannot
access it.
"""
retrieve_kwargs = retrieve_kwargs or {}
try:
if "mark_as_read" not in retrieve_kwargs:
retrieve_kwargs["mark_as_read"] = False
cc_thread = Thread(id=thread_id).retrieve(**retrieve_kwargs)
course_key = CourseKey.from_string(cc_thread["course_id"])
course = _get_course_or_404(course_key, request.user)
context = get_context(course, request, cc_thread)
if (
not context["is_requester_privileged"] and
cc_thread["group_id"] and
is_commentable_cohorted(course.id, cc_thread["commentable_id"])
):
requester_cohort = get_cohort_id(request.user, course.id)
if requester_cohort is not None and cc_thread["group_id"] != requester_cohort:
raise Http404
return cc_thread, context
except CommentClientRequestError:
# params are validated at a higher level, so the only possible request
# error is if the thread doesn't exist
raise Http404
def _get_comment_and_context(request, comment_id):
"""
Retrieve the given comment and build a serializer context for it, returning
both. This function also enforces access control for the comment (checking
both the user's access to the course and to the comment's thread's cohort if
applicable). Raises Http404 if the comment does not exist or the user cannot
access it.
"""
try:
cc_comment = Comment(id=comment_id).retrieve()
_, context = _get_thread_and_context(request, cc_comment["thread_id"])
return cc_comment, context
except CommentClientRequestError:
raise Http404
def _is_user_author_or_privileged(cc_content, context):
"""
Check if the user is the author of a content object or a privileged user.
Returns:
Boolean
"""
return (
context["is_requester_privileged"] or
context["cc_requester"]["id"] == cc_content["user_id"]
)
def get_thread_list_url(request, course_key, topic_id_list=None, following=False):
"""
Returns the URL for the thread_list_url field, given a list of topic_ids
"""
path = reverse("thread-list")
query_list = (
[("course_id", unicode(course_key))] +
[("topic_id", topic_id) for topic_id in topic_id_list or []] +
([("following", following)] if following else [])
)
return request.build_absolute_uri(urlunparse(("", "", path, "", urlencode(query_list), "")))
def get_course(request, course_key):
"""
Return general discussion information for the course.
Parameters:
request: The django request object used for build_absolute_uri and
determining the requesting user.
course_key: The key of the course to get information for
Returns:
The course information; see discussion_api.views.CourseView for more
detail.
Raises:
Http404: if the course does not exist or is not accessible to the
requesting user
"""
course = _get_course_or_404(course_key, request.user)
return {
"id": unicode(course_key),
"blackouts": [
{"start": blackout["start"].isoformat(), "end": blackout["end"].isoformat()}
for blackout in course.get_discussion_blackout_datetimes()
],
"thread_list_url": get_thread_list_url(request, course_key),
"following_thread_list_url": get_thread_list_url(request, course_key, following=True),
"topics_url": request.build_absolute_uri(
reverse("course_topics", kwargs={"course_id": course_key})
)
}
def get_course_topics(request, course_key):
"""
Return the course topic listing for the given course and user.
Parameters:
course_key: The key of the course to get topics for
user: The requesting user, for access control
Returns:
A course topic listing dictionary; see discussion_api.views.CourseTopicViews
for more detail.
"""
def get_module_sort_key(module):
"""
Get the sort key for the module (falling back to the discussion_target
setting if absent)
"""
return module.sort_key or module.discussion_target
course = _get_course_or_404(course_key, request.user)
discussion_modules = get_accessible_discussion_modules(course, request.user)
modules_by_category = defaultdict(list)
for module in discussion_modules:
modules_by_category[module.discussion_category].append(module)
def get_sorted_modules(category):
"""Returns key sorted modules by category"""
return sorted(modules_by_category[category], key=get_module_sort_key)
courseware_topics = [
{
"id": None,
"name": category,
"thread_list_url": get_thread_list_url(
request,
course_key,
[item.discussion_id for item in get_sorted_modules(category)]
),
"children": [
{
"id": module.discussion_id,
"name": module.discussion_target,
"thread_list_url": get_thread_list_url(request, course_key, [module.discussion_id]),
"children": [],
}
for module in get_sorted_modules(category)
],
}
for category in sorted(modules_by_category.keys())
]
non_courseware_topics = [
{
"id": entry["id"],
"name": name,
"thread_list_url": get_thread_list_url(request, course_key, [entry["id"]]),
"children": [],
}
for name, entry in sorted(
course.discussion_topics.items(),
key=lambda item: item[1].get("sort_key", item[0])
)
]
return {
"courseware_topics": courseware_topics,
"non_courseware_topics": non_courseware_topics,
}
def get_thread_list(
request,
course_key,
page,
page_size,
topic_id_list=None,
text_search=None,
following=False,
view=None,
order_by="last_activity_at",
order_direction="desc",
):
"""
Return the list of all discussion threads pertaining to the given course
Parameters:
request: The django request objects used for build_absolute_uri
course_key: The key of the course to get discussion threads for
page: The page number (1-indexed) to retrieve
page_size: The number of threads to retrieve per page
topic_id_list: The list of topic_ids to get the discussion threads for
text_search A text search query string to match
following: If true, retrieve only threads the requester is following
view: filters for either "unread" or "unanswered" threads
order_by: The key in which to sort the threads by. The only values are
"last_activity_at", "comment_count", and "vote_count". The default is
"last_activity_at".
order_direction: The direction in which to sort the threads by. The only
values are "asc" or "desc". The default is "desc".
Note that topic_id_list, text_search, and following are mutually exclusive.
Returns:
A paginated result containing a list of threads; see
discussion_api.views.ThreadViewSet for more detail.
Raises:
ValidationError: if an invalid value is passed for a field.
ValueError: if more than one of the mutually exclusive parameters is
provided
Http404: if the requesting user does not have access to the requested course
or a page beyond the last is requested
"""
exclusive_param_count = sum(1 for param in [topic_id_list, text_search, following] if param)
if exclusive_param_count > 1: # pragma: no cover
raise ValueError("More than one mutually exclusive param passed to get_thread_list")
cc_map = {"last_activity_at": "activity", "comment_count": "comments", "vote_count": "votes"}
if order_by not in cc_map:
raise ValidationError({
"order_by":
["Invalid value. '{}' must be 'last_activity_at', 'comment_count', or 'vote_count'".format(order_by)]
})
if order_direction not in ["asc", "desc"]:
raise ValidationError({
"order_direction": ["Invalid value. '{}' must be 'asc' or 'desc'".format(order_direction)]
})
course = _get_course_or_404(course_key, request.user)
context = get_context(course, request)
query_params = {
"user_id": unicode(request.user.id),
"group_id": (
None if context["is_requester_privileged"] else
get_cohort_id(request.user, course.id)
),
"page": page,
"per_page": page_size,
"text": text_search,
"sort_key": cc_map.get(order_by),
"sort_order": order_direction,
}
text_search_rewrite = None
if view:
if view in ["unread", "unanswered"]:
query_params[view] = "true"
else:
ValidationError({
"view": ["Invalid value. '{}' must be 'unread' or 'unanswered'".format(view)]
})
if following:
threads, result_page, num_pages = context["cc_requester"].subscribed_threads(query_params)
else:
query_params["course_id"] = unicode(course.id)
query_params["commentable_ids"] = ",".join(topic_id_list) if topic_id_list else None
query_params["text"] = text_search
threads, result_page, num_pages, text_search_rewrite = Thread.search(query_params)
# The comments service returns the last page of results if the requested
# page is beyond the last page, but we want be consistent with DRF's general
# behavior and return a 404 in that case
if result_page != page:
raise Http404
results = [ThreadSerializer(thread, context=context).data for thread in threads]
ret = get_paginated_data(request, results, page, num_pages)
ret["text_search_rewrite"] = text_search_rewrite
return ret
def get_comment_list(request, thread_id, endorsed, page, page_size):
"""
Return the list of comments in the given thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id of the thread to get comments for.
endorsed: Boolean indicating whether to get endorsed or non-endorsed
comments (or None for all comments). Must be None for a discussion
thread and non-None for a question thread.
page: The page number (1-indexed) to retrieve
page_size: The number of comments to retrieve per page
Returns:
A paginated result containing a list of comments; see
discussion_api.views.CommentViewSet for more detail.
"""
response_skip = page_size * (page - 1)
cc_thread, context = _get_thread_and_context(
request,
thread_id,
retrieve_kwargs={
"recursive": False,
"user_id": request.user.id,
"response_skip": response_skip,
"response_limit": page_size,
}
)
# Responses to discussion threads cannot be separated by endorsed, but
# responses to question threads must be separated by endorsed due to the
# existing comments service interface
if cc_thread["thread_type"] == "question":
if endorsed is None:
raise ValidationError({"endorsed": ["This field is required for question threads."]})
elif endorsed:
# CS does not apply resp_skip and resp_limit to endorsed responses
# of a question post
responses = cc_thread["endorsed_responses"][response_skip:(response_skip + page_size)]
resp_total = len(cc_thread["endorsed_responses"])
else:
responses = cc_thread["non_endorsed_responses"]
resp_total = cc_thread["non_endorsed_resp_total"]
else:
if endorsed is not None:
raise ValidationError(
{"endorsed": ["This field may not be specified for discussion threads."]}
)
responses = cc_thread["children"]
resp_total = cc_thread["resp_total"]
# The comments service returns the last page of results if the requested
# page is beyond the last page, but we want be consistent with DRF's general
# behavior and return a 404 in that case
if not responses and page != 1:
raise Http404
num_pages = (resp_total + page_size - 1) / page_size if resp_total else 1
results = [CommentSerializer(response, context=context).data for response in responses]
return get_paginated_data(request, results, page, num_pages)
def _check_fields(allowed_fields, data, message):
"""
Checks that the keys given in data is in allowed_fields
Arguments:
allowed_fields (set): A set of allowed fields
data (dict): The data to compare the allowed_fields against
message (str): The message to return if there are any invalid fields
Raises:
ValidationError if the given data contains a key that is not in
allowed_fields
"""
non_allowed_fields = {field: [message] for field in data.keys() if field not in allowed_fields}
if non_allowed_fields:
raise ValidationError(non_allowed_fields)
def _check_initializable_thread_fields(data, context): # pylint: disable=invalid-name
"""
Checks if the given data contains a thread field that is not initializable
by the requesting user
Arguments:
data (dict): The data to compare the allowed_fields against
context (dict): The context appropriate for use with the thread which
includes the requesting user
Raises:
ValidationError if the given data contains a thread field that is not
initializable by the requesting user
"""
_check_fields(
get_initializable_thread_fields(context),
data,
"This field is not initializable."
)
def _check_initializable_comment_fields(data, context): # pylint: disable=invalid-name
"""
Checks if the given data contains a comment field that is not initializable
by the requesting user
Arguments:
data (dict): The data to compare the allowed_fields against
context (dict): The context appropriate for use with the comment which
includes the requesting user
Raises:
ValidationError if the given data contains a comment field that is not
initializable by the requesting user
"""
_check_fields(
get_initializable_comment_fields(context),
data,
"This field is not initializable."
)
def _check_editable_fields(cc_content, data, context):
"""
Raise ValidationError if the given update data contains a field that is not
editable by the requesting user
"""
_check_fields(
get_editable_fields(cc_content, context),
data,
"This field is not editable."
)
def _do_extra_actions(api_content, cc_content, request_fields, actions_form, context):
"""
Perform any necessary additional actions related to content creation or
update that require a separate comments service request.
"""
for field, form_value in actions_form.cleaned_data.items():
if field in request_fields and form_value != api_content[field]:
api_content[field] = form_value
if field == "following":
if form_value:
context["cc_requester"].follow(cc_content)
else:
context["cc_requester"].unfollow(cc_content)
elif field == "abuse_flagged":
if form_value:
cc_content.flagAbuse(context["cc_requester"], cc_content)
else:
cc_content.unFlagAbuse(context["cc_requester"], cc_content, removeAll=False)
else:
assert field == "voted"
signal = thread_voted if cc_content.type == 'thread' else comment_voted
signal.send(sender=None, user=context["request"].user, post=cc_content)
if form_value:
context["cc_requester"].vote(cc_content, "up")
api_content["vote_count"] += 1
else:
context["cc_requester"].unvote(cc_content)
api_content["vote_count"] -= 1
def create_thread(request, thread_data):
"""
Create a thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_data: The data for the created thread.
Returns:
The created thread; see discussion_api.views.ThreadViewSet for more
detail.
"""
course_id = thread_data.get("course_id")
user = request.user
if not course_id:
raise ValidationError({"course_id": ["This field is required."]})
try:
course_key = CourseKey.from_string(course_id)
course = _get_course_or_404(course_key, user)
except (Http404, InvalidKeyError):
raise ValidationError({"course_id": ["Invalid value."]})
context = get_context(course, request)
_check_initializable_thread_fields(thread_data, context)
if (
"group_id" not in thread_data and
is_commentable_cohorted(course_key, thread_data.get("topic_id"))
):
thread_data = thread_data.copy()
thread_data["group_id"] = get_cohort_id(user, course_key)
serializer = ThreadSerializer(data=thread_data, context=context)
actions_form = ThreadActionsForm(thread_data)
if not (serializer.is_valid() and actions_form.is_valid()):
raise ValidationError(dict(serializer.errors.items() + actions_form.errors.items()))
serializer.save()
cc_thread = serializer.instance
thread_created.send(sender=None, user=user, post=cc_thread)
api_thread = serializer.data
_do_extra_actions(api_thread, cc_thread, thread_data.keys(), actions_form, context)
track_thread_created_event(request, course, cc_thread, actions_form.cleaned_data["following"])
return api_thread
def create_comment(request, comment_data):
"""
Create a comment.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_data: The data for the created comment.
Returns:
The created comment; see discussion_api.views.CommentViewSet for more
detail.
"""
thread_id = comment_data.get("thread_id")
if not thread_id:
raise ValidationError({"thread_id": ["This field is required."]})
try:
cc_thread, context = _get_thread_and_context(request, thread_id)
except Http404:
raise ValidationError({"thread_id": ["Invalid value."]})
# if a thread is closed; no new comments could be made to it
if cc_thread['closed']:
raise PermissionDenied
_check_initializable_comment_fields(comment_data, context)
serializer = CommentSerializer(data=comment_data, context=context)
actions_form = CommentActionsForm(comment_data)
if not (serializer.is_valid() and actions_form.is_valid()):
raise ValidationError(dict(serializer.errors.items() + actions_form.errors.items()))
serializer.save()
cc_comment = serializer.instance
comment_created.send(sender=None, user=request.user, post=cc_comment)
api_comment = serializer.data
_do_extra_actions(api_comment, cc_comment, comment_data.keys(), actions_form, context)
track_comment_created_event(request, context["course"], cc_comment, cc_thread["commentable_id"], followed=False)
return api_comment
def update_thread(request, thread_id, update_data):
"""
Update a thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id for the thread to update.
update_data: The data to update in the thread.
Returns:
The updated thread; see discussion_api.views.ThreadViewSet for more
detail.
"""
cc_thread, context = _get_thread_and_context(request, thread_id)
_check_editable_fields(cc_thread, update_data, context)
serializer = ThreadSerializer(cc_thread, data=update_data, partial=True, context=context)
actions_form = ThreadActionsForm(update_data)
if not (serializer.is_valid() and actions_form.is_valid()):
raise ValidationError(dict(serializer.errors.items() + actions_form.errors.items()))
# Only save thread object if some of the edited fields are in the thread data, not extra actions
if set(update_data) - set(actions_form.fields):
serializer.save()
# signal to update Teams when a user edits a thread
thread_edited.send(sender=None, user=request.user, post=cc_thread)
api_thread = serializer.data
_do_extra_actions(api_thread, cc_thread, update_data.keys(), actions_form, context)
return api_thread
def update_comment(request, comment_id, update_data):
"""
Update a comment.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_id: The id for the comment to update.
update_data: The data to update in the comment.
Returns:
The updated comment; see discussion_api.views.CommentViewSet for more
detail.
Raises:
Http404: if the comment does not exist or is not accessible to the
requesting user
PermissionDenied: if the comment is accessible to but not editable by
the requesting user
ValidationError: if there is an error applying the update (e.g. raw_body
is empty or thread_id is included)
"""
cc_comment, context = _get_comment_and_context(request, comment_id)
_check_editable_fields(cc_comment, update_data, context)
serializer = CommentSerializer(cc_comment, data=update_data, partial=True, context=context)
actions_form = CommentActionsForm(update_data)
if not (serializer.is_valid() and actions_form.is_valid()):
raise ValidationError(dict(serializer.errors.items() + actions_form.errors.items()))
# Only save comment object if some of the edited fields are in the comment data, not extra actions
if set(update_data) - set(actions_form.fields):
serializer.save()
comment_edited.send(sender=None, user=request.user, post=cc_comment)
api_comment = serializer.data
_do_extra_actions(api_comment, cc_comment, update_data.keys(), actions_form, context)
return api_comment
def get_thread(request, thread_id):
"""
Retrieve a thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id for the thread to retrieve
"""
cc_thread, context = _get_thread_and_context(
request,
thread_id,
retrieve_kwargs={"user_id": unicode(request.user.id)}
)
serializer = ThreadSerializer(cc_thread, context=context)
return serializer.data
def get_response_comments(request, comment_id, page, page_size):
"""
Return the list of comments for the given thread response.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_id: The id of the comment/response to get child comments for.
page: The page number (1-indexed) to retrieve
page_size: The number of comments to retrieve per page
Returns:
A paginated result containing a list of comments
"""
try:
cc_comment = Comment(id=comment_id).retrieve()
cc_thread, context = _get_thread_and_context(
request,
cc_comment["thread_id"],
retrieve_kwargs={
"recursive": True,
}
)
if cc_thread["thread_type"] == "question":
thread_responses = itertools.chain(cc_thread["endorsed_responses"], cc_thread["non_endorsed_responses"])
else:
thread_responses = cc_thread["children"]
response_comments = []
for response in thread_responses:
if response["id"] == comment_id:
response_comments = response["children"]
break
response_skip = page_size * (page - 1)
paged_response_comments = response_comments[response_skip:(response_skip + page_size)]
results = [CommentSerializer(comment, context=context).data for comment in paged_response_comments]
comments_count = len(response_comments)
num_pages = (comments_count + page_size - 1) / page_size if comments_count else 1
return get_paginated_data(request, results, page, num_pages)
except CommentClientRequestError:
raise Http404
def delete_thread(request, thread_id):
"""
Delete a thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id for the thread to delete
Raises:
PermissionDenied: if user does not have permission to delete thread
"""
cc_thread, context = _get_thread_and_context(request, thread_id)
if can_delete(cc_thread, context):
cc_thread.delete()
thread_deleted.send(sender=None, user=request.user, post=cc_thread)
else:
raise PermissionDenied
def delete_comment(request, comment_id):
"""
Delete a comment.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_id: The id of the comment to delete
Raises:
PermissionDenied: if user does not have permission to delete thread
"""
cc_comment, context = _get_comment_and_context(request, comment_id)
if can_delete(cc_comment, context):
cc_comment.delete()
comment_deleted.send(sender=None, user=request.user, post=cc_comment)
else:
raise PermissionDenied
| agpl-3.0 | 3,909,942,036,455,274,000 | 34.692403 | 117 | 0.650919 | false |
primiano/depot_tools | git_map_branches.py | 1 | 10238 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a short mapping of all the branches in your local repo, organized
by their upstream ('tracking branch') layout.
Example:
origin/master
cool_feature
dependent_feature
other_dependent_feature
other_feature
Branches are colorized as follows:
* Red - a remote branch (usually the root of all local branches)
* Cyan - a local branch which is the same as HEAD
* Note that multiple branches may be Cyan, if they are all on the same
commit, and you have that commit checked out.
* Green - a local branch
* Blue - a 'branch-heads' branch
* Magenta - a tag
* Magenta '{NO UPSTREAM}' - If you have local branches which do not track any
upstream, then you will see this.
"""
import argparse
import collections
import os
import subprocess2
import sys
from git_common import current_branch, upstream, tags, get_branches_info
from git_common import get_git_version, MIN_UPSTREAM_TRACK_GIT_VERSION, hash_one
from git_common import run
import setup_color
from third_party.colorama import Fore, Style
DEFAULT_SEPARATOR = ' ' * 4
class OutputManager(object):
"""Manages a number of OutputLines and formats them into aligned columns."""
def __init__(self):
self.lines = []
self.nocolor = False
self.max_column_lengths = []
self.num_columns = None
def append(self, line):
# All lines must have the same number of columns.
if not self.num_columns:
self.num_columns = len(line.columns)
self.max_column_lengths = [0] * self.num_columns
assert self.num_columns == len(line.columns)
if self.nocolor:
line.colors = [''] * self.num_columns
self.lines.append(line)
# Update maximum column lengths.
for i, col in enumerate(line.columns):
self.max_column_lengths[i] = max(self.max_column_lengths[i], len(col))
def as_formatted_string(self):
return '\n'.join(
l.as_padded_string(self.max_column_lengths) for l in self.lines)
class OutputLine(object):
"""A single line of data.
This consists of an equal number of columns, colors and separators."""
def __init__(self):
self.columns = []
self.separators = []
self.colors = []
def append(self, data, separator=DEFAULT_SEPARATOR, color=Fore.WHITE):
self.columns.append(data)
self.separators.append(separator)
self.colors.append(color)
def as_padded_string(self, max_column_lengths):
""""Returns the data as a string with each column padded to
|max_column_lengths|."""
output_string = ''
for i, (color, data, separator) in enumerate(
zip(self.colors, self.columns, self.separators)):
if max_column_lengths[i] == 0:
continue
padding = (max_column_lengths[i] - len(data)) * ' '
output_string += color + data + padding + separator
return output_string.rstrip()
class BranchMapper(object):
"""A class which constructs output representing the tree's branch structure.
Attributes:
__branches_info: a map of branches to their BranchesInfo objects which
consist of the branch hash, upstream and ahead/behind status.
__gone_branches: a set of upstreams which are not fetchable by git"""
def __init__(self):
self.verbosity = 0
self.maxjobs = 0
self.show_subject = False
self.output = OutputManager()
self.__gone_branches = set()
self.__branches_info = None
self.__parent_map = collections.defaultdict(list)
self.__current_branch = None
self.__current_hash = None
self.__tag_set = None
self.__status_info = {}
def start(self):
self.__branches_info = get_branches_info(
include_tracking_status=self.verbosity >= 1)
if (self.verbosity >= 2):
# Avoid heavy import unless necessary.
from git_cl import get_cl_statuses, color_for_status, Changelist
change_cls = [Changelist(branchref='refs/heads/'+b)
for b in self.__branches_info.keys() if b]
status_info = get_cl_statuses(change_cls,
fine_grained=self.verbosity > 2,
max_processes=self.maxjobs)
# This is a blocking get which waits for the remote CL status to be
# retrieved.
for cl, status in status_info:
self.__status_info[cl.GetBranch()] = (cl.GetIssueURL(),
color_for_status(status),
status)
roots = set()
# A map of parents to a list of their children.
for branch, branch_info in self.__branches_info.iteritems():
if not branch_info:
continue
parent = branch_info.upstream
if not self.__branches_info[parent]:
branch_upstream = upstream(branch)
# If git can't find the upstream, mark the upstream as gone.
if branch_upstream:
parent = branch_upstream
else:
self.__gone_branches.add(parent)
# A parent that isn't in the branches info is a root.
roots.add(parent)
self.__parent_map[parent].append(branch)
self.__current_branch = current_branch()
self.__current_hash = hash_one('HEAD', short=True)
self.__tag_set = tags()
if roots:
for root in sorted(roots):
self.__append_branch(root)
else:
no_branches = OutputLine()
no_branches.append('No User Branches')
self.output.append(no_branches)
def __is_invalid_parent(self, parent):
return not parent or parent in self.__gone_branches
def __color_for_branch(self, branch, branch_hash):
if branch.startswith('origin/'):
color = Fore.RED
elif branch.startswith('branch-heads'):
color = Fore.BLUE
elif self.__is_invalid_parent(branch) or branch in self.__tag_set:
color = Fore.MAGENTA
elif self.__current_hash.startswith(branch_hash):
color = Fore.CYAN
else:
color = Fore.GREEN
if branch_hash and self.__current_hash.startswith(branch_hash):
color += Style.BRIGHT
else:
color += Style.NORMAL
return color
def __append_branch(self, branch, depth=0):
"""Recurses through the tree structure and appends an OutputLine to the
OutputManager for each branch."""
branch_info = self.__branches_info[branch]
if branch_info:
branch_hash = branch_info.hash
else:
try:
branch_hash = hash_one(branch, short=True)
except subprocess2.CalledProcessError:
branch_hash = None
line = OutputLine()
# The branch name with appropriate indentation.
suffix = ''
if branch == self.__current_branch or (
self.__current_branch == 'HEAD' and branch == self.__current_hash):
suffix = ' *'
branch_string = branch
if branch in self.__gone_branches:
branch_string = '{%s:GONE}' % branch
if not branch:
branch_string = '{NO_UPSTREAM}'
main_string = ' ' * depth + branch_string + suffix
line.append(
main_string,
color=self.__color_for_branch(branch, branch_hash))
# The branch hash.
if self.verbosity >= 2:
line.append(branch_hash or '', separator=' ', color=Fore.RED)
# The branch tracking status.
if self.verbosity >= 1:
ahead_string = ''
behind_string = ''
front_separator = ''
center_separator = ''
back_separator = ''
if branch_info and not self.__is_invalid_parent(branch_info.upstream):
ahead = branch_info.ahead
behind = branch_info.behind
if ahead:
ahead_string = 'ahead %d' % ahead
if behind:
behind_string = 'behind %d' % behind
if ahead or behind:
front_separator = '['
back_separator = ']'
if ahead and behind:
center_separator = '|'
line.append(front_separator, separator=' ')
line.append(ahead_string, separator=' ', color=Fore.MAGENTA)
line.append(center_separator, separator=' ')
line.append(behind_string, separator=' ', color=Fore.MAGENTA)
line.append(back_separator)
# The Rietveld issue associated with the branch.
if self.verbosity >= 2:
(url, color, status) = ('', '', '') if self.__is_invalid_parent(branch) \
else self.__status_info[branch]
if self.verbosity > 2:
line.append('{} ({})'.format(url, status) if url else '', color=color)
else:
line.append(url or '', color=color)
# The subject of the most recent commit on the branch.
if self.show_subject:
line.append(run('log', '-n1', '--format=%s', branch, '--'))
self.output.append(line)
for child in sorted(self.__parent_map.pop(branch, ())):
self.__append_branch(child, depth=depth + 1)
def main(argv):
setup_color.init()
if get_git_version() < MIN_UPSTREAM_TRACK_GIT_VERSION:
print >> sys.stderr, (
'This tool will not show all tracking information for git version '
'earlier than ' +
'.'.join(str(x) for x in MIN_UPSTREAM_TRACK_GIT_VERSION) +
'. Please consider upgrading.')
parser = argparse.ArgumentParser(
description='Print a a tree of all branches parented by their upstreams')
parser.add_argument('-v', action='count',
help='Display branch hash and Rietveld URL')
parser.add_argument('--no-color', action='store_true', dest='nocolor',
help='Turn off colors.')
parser.add_argument(
'-j', '--maxjobs', action='store', type=int,
help='The number of jobs to use when retrieving review status')
parser.add_argument('--show-subject', action='store_true',
dest='show_subject', help='Show the commit subject.')
opts = parser.parse_args(argv)
mapper = BranchMapper()
mapper.verbosity = opts.v
mapper.output.nocolor = opts.nocolor
mapper.maxjobs = opts.maxjobs
mapper.show_subject = opts.show_subject
mapper.start()
print mapper.output.as_formatted_string()
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
| bsd-3-clause | -8,290,671,882,435,309,000 | 31.194969 | 80 | 0.63528 | false |
Cryptoc1/Veer | Memory.py | 1 | 1631 | #!/usr/bin/python
import pygtk; pygtk.require('2.0')
import gtk, cairo
import Veer
import re
class Graph:
def __init__(self):
self.DrawingArea = gtk.DrawingArea()
self.DrawingArea.set_app_paintable(True)
# self.DrawingArea.connect('expose-event', self.update)
Veer.sidebar.size_plugin(self.DrawingArea)
Veer.sidebar.view.attach(self.DrawingArea, 0, 1, Veer.TOP_ATTACH, Veer.BOTTOM_ATTACH, gtk.EXPAND, gtk.FILL, 1, 1)
def update(self):
cr = self.DrawingArea.window.cairo_create()
cr.set_operator(cairo.OPERATOR_CLEAR)
cr.rectangle(0, 0, *self.DrawingArea.window.get_size())
cr.fill()
cr.set_operator(cairo.OPERATOR_OVER)
self.update_cpu_info()
print self.cpu_proc
cr.set_source_rgba(0.8, 0.4, 0.4, 0.65)
cr.rectangle(0, 90, (int(self.cpu_proc) / 1000), 10)
cr.fill()
def update_cpu_info(self):
f = open('/proc/stat', 'r')
cpu_stat = f.readline().split()
self.cpu_proc = cpu_stat[1]
f.close()
def update_mem_info(self):
f = open('/proc/meminfo', 'r')
mem_total = f.readline()
mem_free = f.readline()
mem_total = re.sub('[A-Z-a-z-:]', '', mem_total)
self.mem_total = float(mem_total.replace(" ", ""))
mem_free = re.sub('[A-Z-a-z-:]', '', mem_free)
self.mem_free = float(mem_free.replace(" ", ""))
self.mem_used = self.mem_total - self.mem_free
self.usage = self.mem_used / self.mem_total
f.close()
graph = Graph()
gtk.timeout_add(200, graph.update)
Veer.sidebar.update()
| mit | -8,029,232,803,097,287,000 | 29.773585 | 121 | 0.58553 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.