repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
partofthething/home-assistant
|
homeassistant/components/progettihwsw/config_flow.py
|
11
|
3383
|
"""Config flow for ProgettiHWSW Automation integration."""
from ProgettiHWSW.ProgettiHWSWAPI import ProgettiHWSWAPI
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from .const import DOMAIN
DATA_SCHEMA = vol.Schema(
{vol.Required("host"): str, vol.Required("port", default=80): int}
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user host input."""
confs = hass.config_entries.async_entries(DOMAIN)
same_entries = [
True
for entry in confs
if entry.data.get("host") == data["host"]
and entry.data.get("port") == data["port"]
]
if same_entries:
raise ExistingEntry
api_instance = ProgettiHWSWAPI(f'{data["host"]}:{data["port"]}')
is_valid = await api_instance.check_board()
if not is_valid:
raise CannotConnect
return {
"title": is_valid["title"],
"relay_count": is_valid["relay_count"],
"input_count": is_valid["input_count"],
"is_old": is_valid["is_old"],
}
class ProgettiHWSWConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for ProgettiHWSW Automation."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize class variables."""
self.s1_in = None
async def async_step_relay_modes(self, user_input=None):
"""Manage relay modes step."""
errors = {}
if user_input is not None:
whole_data = user_input
whole_data.update(self.s1_in)
return self.async_create_entry(title=whole_data["title"], data=whole_data)
relay_modes_schema = {}
for i in range(1, int(self.s1_in["relay_count"]) + 1):
relay_modes_schema[
vol.Required(f"relay_{str(i)}", default="bistable")
] = vol.In(
{
"bistable": "Bistable (ON/OFF Mode)",
"monostable": "Monostable (Timer Mode)",
}
)
return self.async_show_form(
step_id="relay_modes",
data_schema=vol.Schema(relay_modes_schema),
errors=errors,
)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except ExistingEntry:
return self.async_abort(reason="already_configured")
except Exception: # pylint: disable=broad-except
errors["base"] = "unknown"
else:
user_input.update(info)
self.s1_in = user_input
return await self.async_step_relay_modes()
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot identify host."""
class WrongInfo(exceptions.HomeAssistantError):
"""Error to indicate we cannot validate relay modes input."""
class ExistingEntry(exceptions.HomeAssistantError):
"""Error to indicate we cannot validate relay modes input."""
|
mit
|
2014c2g2/2015cdag2_test
|
static/Brython3.1.1-20150328-091302/Lib/random.py
|
518
|
26080
|
"""Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
# Module adapted for Brython : remove expensive imports
#from warnings import warn as _warn
def _warn(msg):
print(msg)
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from browser import window
def _randint(a, b):
return int(window.Math.random()*(b-a+1)+a)
#from os import urandom as _urandom
def _urandom(n):
"""urandom(n) -> str
Return n random bytes suitable for cryptographic use."""
randbytes= [_randint(0,255) for i in range(n)]
return bytes(randbytes)
#from collections.abc import Set as _Set, Sequence as _Sequence
_Set = set
_Sequence = [str, list]
from hashlib import sha512 as _sha512
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1, the hash() of *a* is used instead.
If *a* is an int, all bits are used.
"""
if a is None:
try:
a = int.from_bytes(_urandom(32), 'big')
except NotImplementedError:
import time
a = int(time.time() * 256) # use fractional seconds
if version == 2:
if isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a += _sha512(a).digest()
a = int.from_bytes(a, 'big')
super().seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super().getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super().setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple(x % (2**32) for x in internalstate)
except ValueError as e:
raise TypeError from e
super().setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is None:
if istart > 0:
return self._randbelow(istart)
raise ValueError("empty range for randrange()")
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
if step == 1 and width > 0:
return istart + self._randbelow(width)
if step == 1:
raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError("zero step for randrange()")
if n <= 0:
raise ValueError("empty range for randrange()")
return istart + istep*self._randbelow(n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, int=int, maxsize=1<<BPF, type=type,
Method=_MethodType, BuiltinMethod=_BuiltinMethodType):
"Return a random int in the range [0,n). Raises ValueError if n==0."
getrandbits = self.getrandbits
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
if type(self.random) is BuiltinMethod or type(getrandbits) is Method:
k = n.bit_length() # don't use (n-1) here because n can be 1
r = getrandbits(k) # 0 <= r < 2**k
while r >= n:
r = getrandbits(k)
return r
# There's an overriden random() method but no new getrandbits() method,
# so we can only use random() from here.
random = self.random
if n >= maxsize:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large.\n"
"To remove the range limitation, add a getrandbits() method.")
return int(random() * n)
rem = maxsize % n
limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
r = random()
while r >= limit:
r = random()
return int(r*maxsize) % n
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
try:
i = self._randbelow(len(seq))
except ValueError:
raise IndexError('Cannot choose from an empty sequence')
return seq[i]
def shuffle(self, x, random=None):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
"""
if random is None:
randbelow = self._randbelow
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = randbelow(i+1)
x[i], x[j] = x[j], x[i]
else:
_int = int
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use range as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(range(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
if isinstance(population, _Set):
population = tuple(population)
if not isinstance(population, _Sequence):
raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
randbelow = self._randbelow
n = len(population)
if not 0 <= k <= n:
raise ValueError("Sample larger than population")
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
# An n-length list is smaller than a k-length set
pool = list(population)
for i in range(k): # invariant: non-selected at [0,n-i)
j = randbelow(n-i)
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
for i in range(k):
j = randbelow(n)
while j in selected:
j = randbelow(n)
selected_add(j)
result[i] = population[j]
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
c = 0.5 if mode is None else (mode - low) / (high - low)
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / u ** (1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * (-_log(u)) ** (1.0/beta)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print(n, 'times', func.__name__)
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print(round(t1-t0, 3), 'sec,', end=' ')
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print('avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest))
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
|
gpl-3.0
|
PYPIT/PYPIT
|
pypeit/scripts/lowrdx_pixflat.py
|
1
|
1101
|
#!/usr/bin/env python
#
# See top-level LICENSE file for Copyright information
#
# -*- coding: utf-8 -*-
"""
This script converts a LowRedux pixel flat into a PYPIT ready one
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
def parser(options=None):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('lowrdx_file', type = str, default = None,
help = 'LowRedux Pixel Flat FITS file')
parser.add_argument('new_file', type = str, default = None, help = 'PYPIT FITS file')
args = parser.parse_args()
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args):
try:
from xastropy.xutils import xdebug as debugger
except:
import pdb as debugger
from pypeit import arlris
# Assume LRIS for now
arlris.convert_lowredux_pixflat(args.lowrdx_file, args.new_file)
|
gpl-3.0
|
StackStorm/mistral
|
mistral/engine/utils.py
|
1
|
3037
|
# Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from mistral.db.v2 import api as db_api
from mistral import exceptions as exc
from mistral import utils
def _compare_parameters(expected_input, actual_input):
"""Compares the expected parameters with the actual parameters.
:param expected_input: Expected dict of parameters.
:param actual_input: Actual dict of parameters.
:return: Tuple {missing parameter names, unexpected parameter names}
"""
missing_params = []
unexpected_params = copy.deepcopy(list((actual_input or {}).keys()))
for p_name, p_value in expected_input.items():
if p_value is utils.NotDefined and p_name not in unexpected_params:
missing_params.append(str(p_name))
if p_name in unexpected_params:
unexpected_params.remove(p_name)
return missing_params, unexpected_params
def validate_input(expected_input, actual_input, obj_name, obj_class):
actual_input = actual_input or {}
missing, unexpected = _compare_parameters(
expected_input,
actual_input
)
if missing or unexpected:
msg = 'Invalid input [name=%s, class=%s'
msg_props = [obj_name, obj_class]
if missing:
msg += ', missing=%s'
msg_props.append(missing)
if unexpected:
msg += ', unexpected=%s'
msg_props.append(unexpected)
msg += ']'
raise exc.InputException(msg % tuple(msg_props))
def resolve_workflow_definition(parent_wf_name, parent_wf_spec_name,
namespace, wf_spec_name):
wf_def = None
if parent_wf_name != parent_wf_spec_name:
# If parent workflow belongs to a workbook then
# check child workflow within the same workbook
# (to be able to use short names within workbooks).
# If it doesn't exist then use a name from spec
# to find a workflow in DB.
wb_name = parent_wf_name.rstrip(parent_wf_spec_name)[:-1]
wf_full_name = "%s.%s" % (wb_name, wf_spec_name)
wf_def = db_api.load_workflow_definition(wf_full_name, namespace)
if not wf_def:
wf_def = db_api.load_workflow_definition(wf_spec_name, namespace)
if not wf_def:
raise exc.WorkflowException(
"Failed to find workflow [name=%s] [namespace=%s]" %
(wf_spec_name, namespace)
)
return wf_def
|
apache-2.0
|
nik3daz/chrome-app-samples
|
push-guestbook/guestbook-srv/third_party/oauth2client/appengine.py
|
26
|
22703
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use OAuth 2.0 on Google App Engine.
"""
__author__ = '[email protected] (Joe Gregorio)'
import base64
import cgi
import httplib2
import logging
import os
import pickle
import time
from google.appengine.api import app_identity
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import login_required
from google.appengine.ext.webapp.util import run_wsgi_app
from oauth2client import clientsecrets
from oauth2client import util
from oauth2client import xsrfutil
from oauth2client.anyjson import simplejson
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import AssertionCredentials
from oauth2client.client import Credentials
from oauth2client.client import Flow
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import Storage
logger = logging.getLogger(__name__)
OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns'
XSRF_MEMCACHE_ID = 'xsrf_secret_key'
def _safe_html(s):
"""Escape text to make it safe to display.
Args:
s: string, The text to escape.
Returns:
The escaped text as a string.
"""
return cgi.escape(s, quote=1).replace("'", ''')
class InvalidClientSecretsError(Exception):
"""The client_secrets.json file is malformed or missing required fields."""
class InvalidXsrfTokenError(Exception):
"""The XSRF token is invalid or expired."""
class SiteXsrfSecretKey(db.Model):
"""Storage for the sites XSRF secret key.
There will only be one instance stored of this model, the one used for the
site. """
secret = db.StringProperty()
def _generate_new_xsrf_secret_key():
"""Returns a random XSRF secret key.
"""
return os.urandom(16).encode("hex")
def xsrf_secret_key():
"""Return the secret key for use for XSRF protection.
If the Site entity does not have a secret key, this method will also create
one and persist it.
Returns:
The secret key.
"""
secret = memcache.get(XSRF_MEMCACHE_ID, namespace=OAUTH2CLIENT_NAMESPACE)
if not secret:
# Load the one and only instance of SiteXsrfSecretKey.
model = SiteXsrfSecretKey.get_or_insert(key_name='site')
if not model.secret:
model.secret = _generate_new_xsrf_secret_key()
model.put()
secret = model.secret
memcache.add(XSRF_MEMCACHE_ID, secret, namespace=OAUTH2CLIENT_NAMESPACE)
return str(secret)
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for App Engine Assertion Grants
This object will allow an App Engine application to identify itself to Google
and other OAuth 2.0 servers that can verify assertions. It can be used for the
purpose of accessing data stored under an account assigned to the App Engine
application itself.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
"""
@util.positional(2)
def __init__(self, scope, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or list of strings, scope(s) of the credentials being
requested.
"""
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
super(AppAssertionCredentials, self).__init__(
'ignored' # assertion_type is ignore in this subclass.
)
@classmethod
def from_json(cls, json):
data = simplejson.loads(json)
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Since the underlying App Engine app_identity implementation does its own
caching we can skip all the storage hoops and just to a refresh using the
API.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
try:
(token, _) = app_identity.get_access_token(self.scope)
except app_identity.Error, e:
raise AccessTokenRefreshError(str(e))
self.access_token = token
class FlowProperty(db.Property):
"""App Engine datastore Property for Flow.
Utility property that allows easy storage and retreival of an
oauth2client.Flow"""
# Tell what the user type is.
data_type = Flow
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Flow):
raise db.BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowProperty, self).validate(value)
def empty(self, value):
return not value
class CredentialsProperty(db.Property):
"""App Engine datastore Property for Credentials.
Utility property that allows easy storage and retrieval of
oath2client.Credentials
"""
# Tell what the user type is.
data_type = Credentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
logger.info("get: Got type " + str(type(model_instance)))
cred = super(CredentialsProperty,
self).get_value_for_datastore(model_instance)
if cred is None:
cred = ''
else:
cred = cred.to_json()
return db.Blob(cred)
# For reading from datastore.
def make_value_from_datastore(self, value):
logger.info("make: Got type " + str(type(value)))
if value is None:
return None
if len(value) == 0:
return None
try:
credentials = Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
def validate(self, value):
value = super(CredentialsProperty, self).validate(value)
logger.info("validate: Got type " + str(type(value)))
if value is not None and not isinstance(value, Credentials):
raise db.BadValueError('Property %s must be convertible '
'to a Credentials instance (%s)' %
(self.name, value))
#if value is not None and not isinstance(value, Credentials):
# return None
return value
class StorageByKeyName(Storage):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
@util.positional(4)
def __init__(self, model, key_name, property_name, cache=None):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
cache: memcache, a write-through cache to put in front of the datastore
"""
self._model = model
self._key_name = key_name
self._property_name = property_name
self._cache = cache
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
if self._cache:
json = self._cache.get(self._key_name)
if json:
return Credentials.new_from_json(json)
credential = None
entity = self._model.get_by_key_name(self._key_name)
if entity is not None:
credential = getattr(entity, self._property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
if self._cache:
self._cache.set(self._key_name, credential.to_json())
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
def locked_delete(self):
"""Delete Credential from datastore."""
if self._cache:
self._cache.delete(self._key_name)
entity = self._model.get_by_key_name(self._key_name)
if entity is not None:
entity.delete()
class CredentialsModel(db.Model):
"""Storage for OAuth 2.0 Credentials
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsProperty()
def _build_state_value(request_handler, user):
"""Composes the value for the 'state' parameter.
Packs the current request URI and an XSRF token into an opaque string that
can be passed to the authentication server via the 'state' parameter.
Args:
request_handler: webapp.RequestHandler, The request.
user: google.appengine.api.users.User, The current user.
Returns:
The state value as a string.
"""
uri = request_handler.request.url
token = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(),
action_id=str(uri))
return uri + ':' + token
def _parse_state_value(state, user):
"""Parse the value of the 'state' parameter.
Parses the value and validates the XSRF token in the state parameter.
Args:
state: string, The value of the state parameter.
user: google.appengine.api.users.User, The current user.
Raises:
InvalidXsrfTokenError: if the XSRF token is invalid.
Returns:
The redirect URI.
"""
uri, token = state.rsplit(':', 1)
if not xsrfutil.validate_token(xsrf_secret_key(), token, user.user_id(),
action_id=uri):
raise InvalidXsrfTokenError()
return uri
class OAuth2Decorator(object):
"""Utility for making OAuth 2.0 easier.
Instantiate and then use with oauth_required or oauth_aware
as decorators on webapp.RequestHandler methods.
Example:
decorator = OAuth2Decorator(
client_id='837...ent.com',
client_secret='Qh...wwI',
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
@util.positional(4)
def __init__(self, client_id, client_secret, scope,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
user_agent=None,
message=None,
callback_path='/oauth2callback',
**kwargs):
"""Constructor for OAuth2Decorator
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or list of strings, scope(s) of the credentials being
requested.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
user_agent: string, User agent of your application, default to None.
message: Message to display if there are problems with the OAuth 2.0
configuration. The message may contain HTML and will be presented on the
web interface for any method that uses the decorator.
callback_path: string, The absolute path to use as the callback URI. Note
that this must match up with the URI given when registering the
application in the APIs Console.
**kwargs: dict, Keyword arguments are be passed along as kwargs to the
OAuth2WebServerFlow constructor.
"""
self.flow = None
self.credentials = None
self._client_id = client_id
self._client_secret = client_secret
self._scope = scope
self._auth_uri = auth_uri
self._token_uri = token_uri
self._user_agent = user_agent
self._kwargs = kwargs
self._message = message
self._in_error = False
self._callback_path = callback_path
def _display_error_message(self, request_handler):
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(_safe_html(self._message))
request_handler.response.out.write('</body></html>')
def oauth_required(self, method):
"""Decorator that starts the OAuth 2.0 dance.
Starts the OAuth dance for the logged in user if they haven't already
granted access for this application.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def check_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self._create_flow(request_handler)
# Store the request URI in 'state' so we can use it later
self.flow.params['state'] = _build_state_value(request_handler, user)
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
return method(request_handler, *args, **kwargs)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
return check_oauth
def _create_flow(self, request_handler):
"""Create the Flow object.
The Flow is calculated lazily since we don't know where this app is
running until it receives a request, at which point redirect_uri can be
calculated and then the Flow object can be constructed.
Args:
request_handler: webapp.RequestHandler, the request handler.
"""
if self.flow is None:
redirect_uri = request_handler.request.relative_url(
self._callback_path) # Usually /oauth2callback
self.flow = OAuth2WebServerFlow(self._client_id, self._client_secret,
self._scope, redirect_uri=redirect_uri,
user_agent=self._user_agent,
auth_uri=self._auth_uri,
token_uri=self._token_uri, **self._kwargs)
def oauth_aware(self, method):
"""Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
Does all the setup for the OAuth dance, but doesn't initiate it.
This decorator is useful if you want to create a page that knows
whether or not the user has granted access to this application.
From within a method decorated with @oauth_aware the has_credentials()
and authorize_url() methods can be called.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def setup_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self._create_flow(request_handler)
self.flow.params['state'] = _build_state_value(request_handler, user)
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
return method(request_handler, *args, **kwargs)
return setup_oauth
def has_credentials(self):
"""True if for the logged in user there are valid access Credentials.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
return self.credentials is not None and not self.credentials.invalid
def authorize_url(self):
"""Returns the URL to start the OAuth dance.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
url = self.flow.step1_get_authorize_url()
return str(url)
def http(self):
"""Returns an authorized http instance.
Must only be called from within an @oauth_required decorated method, or
from within an @oauth_aware decorated method where has_credentials()
returns True.
"""
return self.credentials.authorize(httplib2.Http())
@property
def callback_path(self):
"""The absolute path where the callback will occur.
Note this is the absolute path, not the absolute URI, that will be
calculated by the decorator at runtime. See callback_handler() for how this
should be used.
Returns:
The callback path as a string.
"""
return self._callback_path
def callback_handler(self):
"""RequestHandler for the OAuth 2.0 redirect callback.
Usage:
app = webapp.WSGIApplication([
('/index', MyIndexHandler),
...,
(decorator.callback_path, decorator.callback_handler())
])
Returns:
A webapp.RequestHandler that handles the redirect back from the
server during the OAuth 2.0 dance.
"""
decorator = self
class OAuth2Handler(webapp.RequestHandler):
"""Handler for the redirect_uri of the OAuth 2.0 dance."""
@login_required
def get(self):
error = self.request.get('error')
if error:
errormsg = self.request.get('error_description', error)
self.response.out.write(
'The authorization request failed: %s' % _safe_html(errormsg))
else:
user = users.get_current_user()
decorator._create_flow(self)
credentials = decorator.flow.step2_exchange(self.request.params)
StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').put(credentials)
redirect_uri = _parse_state_value(str(self.request.get('state')),
user)
self.redirect(redirect_uri)
return OAuth2Handler
def callback_application(self):
"""WSGI application for handling the OAuth 2.0 redirect callback.
If you need finer grained control use `callback_handler` which returns just
the webapp.RequestHandler.
Returns:
A webapp.WSGIApplication that handles the redirect back from the
server during the OAuth 2.0 dance.
"""
return webapp.WSGIApplication([
(self.callback_path, self.callback_handler())
])
class OAuth2DecoratorFromClientSecrets(OAuth2Decorator):
"""An OAuth2Decorator that builds from a clientsecrets file.
Uses a clientsecrets file as the source for all the information when
constructing an OAuth2Decorator.
Example:
decorator = OAuth2DecoratorFromClientSecrets(
os.path.join(os.path.dirname(__file__), 'client_secrets.json')
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
@util.positional(3)
def __init__(self, filename, scope, message=None, cache=None):
"""Constructor
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML
and will be presented on the web interface for any method that uses the
decorator.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
"""
client_type, client_info = clientsecrets.loadfile(filename, cache=cache)
if client_type not in [
clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
raise InvalidClientSecretsError(
'OAuth2Decorator doesn\'t support this OAuth 2.0 flow.')
super(OAuth2DecoratorFromClientSecrets, self).__init__(
client_info['client_id'],
client_info['client_secret'],
scope,
auth_uri=client_info['auth_uri'],
token_uri=client_info['token_uri'],
message=message)
if message is not None:
self._message = message
else:
self._message = "Please configure your application for OAuth 2.0"
@util.positional(2)
def oauth2decorator_from_clientsecrets(filename, scope,
message=None, cache=None):
"""Creates an OAuth2Decorator populated from a clientsecrets file.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns: An OAuth2Decorator
"""
return OAuth2DecoratorFromClientSecrets(filename, scope,
message=message, cache=cache)
|
apache-2.0
|
tv42/camlistore
|
lib/python/fusepy/fuse.py
|
23
|
22799
|
# Copyright (c) 2008 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
from ctypes import *
from ctypes.util import find_library
from errno import *
from functools import partial
from os import strerror
from platform import machine, system
from stat import S_IFDIR
from traceback import print_exc
class c_timespec(Structure):
_fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)]
class c_utimbuf(Structure):
_fields_ = [('actime', c_timespec), ('modtime', c_timespec)]
class c_stat(Structure):
pass # Platform dependent
_system = system()
if _system in ('Darwin', 'FreeBSD'):
_libiconv = CDLL(find_library("iconv"), RTLD_GLOBAL) # libfuse dependency
ENOTSUP = 45
c_dev_t = c_int32
c_fsblkcnt_t = c_ulong
c_fsfilcnt_t = c_ulong
c_gid_t = c_uint32
c_mode_t = c_uint16
c_off_t = c_int64
c_pid_t = c_int32
c_uid_t = c_uint32
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int, c_uint32)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_uint32)
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_uint32),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32)]
elif _system == 'Linux':
ENOTSUP = 95
c_dev_t = c_ulonglong
c_fsblkcnt_t = c_ulonglong
c_fsfilcnt_t = c_ulonglong
c_gid_t = c_uint
c_mode_t = c_uint
c_off_t = c_longlong
c_pid_t = c_int
c_uid_t = c_uint
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
_machine = machine()
if _machine == 'x86_64':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulong),
('st_nlink', c_ulong),
('st_mode', c_mode_t),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('__pad0', c_int),
('st_rdev', c_dev_t),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_long),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
elif _machine == 'ppc':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulonglong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
else:
# i686, use as fallback for everything else
c_stat._fields_ = [
('st_dev', c_dev_t),
('__pad1', c_ushort),
('__st_ino', c_ulong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_ino', c_ulonglong)]
else:
raise NotImplementedError('%s is not supported.' % _system)
class c_statvfs(Structure):
_fields_ = [
('f_bsize', c_ulong),
('f_frsize', c_ulong),
('f_blocks', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_bavail', c_fsblkcnt_t),
('f_files', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_favail', c_fsfilcnt_t)]
if _system == 'FreeBSD':
c_fsblkcnt_t = c_uint64
c_fsfilcnt_t = c_uint64
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
class c_statvfs(Structure):
_fields_ = [
('f_bavail', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_blocks', c_fsblkcnt_t),
('f_favail', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_files', c_fsfilcnt_t),
('f_bsize', c_ulong),
('f_flag', c_ulong),
('f_frsize', c_ulong)]
class fuse_file_info(Structure):
_fields_ = [
('flags', c_int),
('fh_old', c_ulong),
('writepage', c_int),
('direct_io', c_uint, 1),
('keep_cache', c_uint, 1),
('flush', c_uint, 1),
('padding', c_uint, 29),
('fh', c_uint64),
('lock_owner', c_uint64)]
class fuse_context(Structure):
_fields_ = [
('fuse', c_voidp),
('uid', c_uid_t),
('gid', c_gid_t),
('pid', c_pid_t),
('private_data', c_voidp)]
class fuse_operations(Structure):
_fields_ = [
('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))),
('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('getdir', c_voidp), # Deprecated, use readdir
('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)),
('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('unlink', CFUNCTYPE(c_int, c_char_p)),
('rmdir', CFUNCTYPE(c_int, c_char_p)),
('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('link', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)),
('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)),
('utime', c_voidp), # Deprecated, use utimens
('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t,
POINTER(fuse_file_info))),
('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t,
POINTER(fuse_file_info))),
('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))),
('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
('setxattr', setxattr_t),
('getxattr', getxattr_t),
('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp, CFUNCTYPE(c_int, c_voidp,
c_char_p, POINTER(c_stat), c_off_t), c_off_t, POINTER(fuse_file_info))),
('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
('init', CFUNCTYPE(c_voidp, c_voidp)),
('destroy', CFUNCTYPE(c_voidp, c_voidp)),
('access', CFUNCTYPE(c_int, c_char_p, c_int)),
('create', CFUNCTYPE(c_int, c_char_p, c_mode_t, POINTER(fuse_file_info))),
('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t, POINTER(fuse_file_info))),
('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat),
POINTER(fuse_file_info))),
('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info), c_int, c_voidp)),
('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))),
('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong)))]
def time_of_timespec(ts):
return ts.tv_sec + ts.tv_nsec / 10 ** 9
def set_st_attrs(st, attrs):
for key, val in attrs.items():
if key in ('st_atime', 'st_mtime', 'st_ctime'):
timespec = getattr(st, key + 'spec')
timespec.tv_sec = int(val)
timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9)
elif hasattr(st, key):
setattr(st, key, val)
_libfuse_path = find_library('fuse')
if not _libfuse_path:
raise EnvironmentError('Unable to find libfuse')
_libfuse = CDLL(_libfuse_path)
_libfuse.fuse_get_context.restype = POINTER(fuse_context)
def fuse_get_context():
"""Returns a (uid, gid, pid) tuple"""
ctxp = _libfuse.fuse_get_context()
ctx = ctxp.contents
return ctx.uid, ctx.gid, ctx.pid
class FuseOSError(OSError):
def __init__(self, errno):
super(FuseOSError, self).__init__(errno, strerror(errno))
class FUSE(object):
"""This class is the lower level interface and should not be subclassed
under normal use. Its methods are called by fuse.
Assumes API version 2.6 or later."""
def __init__(self, operations, mountpoint, raw_fi=False, **kwargs):
"""Setting raw_fi to True will cause FUSE to pass the fuse_file_info
class as is to Operations, instead of just the fh field.
This gives you access to direct_io, keep_cache, etc."""
self.operations = operations
self.raw_fi = raw_fi
args = ['fuse']
if kwargs.pop('foreground', False):
args.append('-f')
if kwargs.pop('debug', False):
args.append('-d')
if kwargs.pop('nothreads', False):
args.append('-s')
kwargs.setdefault('fsname', operations.__class__.__name__)
args.append('-o')
args.append(','.join(key if val == True else '%s=%s' % (key, val)
for key, val in kwargs.items()))
args.append(mountpoint)
argv = (c_char_p * len(args))(*args)
fuse_ops = fuse_operations()
for name, prototype in fuse_operations._fields_:
if prototype != c_voidp and getattr(operations, name, None):
op = partial(self._wrapper_, getattr(self, name))
setattr(fuse_ops, name, prototype(op))
err = _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops),
sizeof(fuse_ops), None)
del self.operations # Invoke the destructor
if err:
raise RuntimeError(err)
def _wrapper_(self, func, *args, **kwargs):
"""Decorator for the methods that follow"""
try:
return func(*args, **kwargs) or 0
except OSError, e:
return -(e.errno or EFAULT)
except:
print_exc()
return -EFAULT
def getattr(self, path, buf):
return self.fgetattr(path, buf, None)
def readlink(self, path, buf, bufsize):
ret = self.operations('readlink', path)
data = create_string_buffer(ret[:bufsize - 1])
memmove(buf, data, len(data))
return 0
def mknod(self, path, mode, dev):
return self.operations('mknod', path, mode, dev)
def mkdir(self, path, mode):
return self.operations('mkdir', path, mode)
def unlink(self, path):
return self.operations('unlink', path)
def rmdir(self, path):
return self.operations('rmdir', path)
def symlink(self, source, target):
return self.operations('symlink', target, source)
def rename(self, old, new):
return self.operations('rename', old, new)
def link(self, source, target):
return self.operations('link', target, source)
def chmod(self, path, mode):
return self.operations('chmod', path, mode)
def chown(self, path, uid, gid):
# Check if any of the arguments is a -1 that has overflowed
if c_uid_t(uid + 1).value == 0:
uid = -1
if c_gid_t(gid + 1).value == 0:
gid = -1
return self.operations('chown', path, uid, gid)
def truncate(self, path, length):
return self.operations('truncate', path, length)
def open(self, path, fip):
fi = fip.contents
if self.raw_fi:
return self.operations('open', path, fi)
else:
fi.fh = self.operations('open', path, fi.flags)
return 0
def read(self, path, buf, size, offset, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
ret = self.operations('read', path, size, offset, fh)
if not ret:
return 0
data = create_string_buffer(ret[:size], size)
memmove(buf, data, size)
return size
def write(self, path, buf, size, offset, fip):
data = string_at(buf, size)
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('write', path, data, offset, fh)
def statfs(self, path, buf):
stv = buf.contents
attrs = self.operations('statfs', path)
for key, val in attrs.items():
if hasattr(stv, key):
setattr(stv, key, val)
return 0
def flush(self, path, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('flush', path, fh)
def release(self, path, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('release', path, fh)
def fsync(self, path, datasync, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('fsync', path, datasync, fh)
def setxattr(self, path, name, value, size, options, *args):
data = string_at(value, size)
return self.operations('setxattr', path, name, data, options, *args)
def getxattr(self, path, name, value, size, *args):
ret = self.operations('getxattr', path, name, *args)
retsize = len(ret)
buf = create_string_buffer(ret, retsize) # Does not add trailing 0
if bool(value):
if retsize > size:
return -ERANGE
memmove(value, buf, retsize)
return retsize
def listxattr(self, path, namebuf, size):
ret = self.operations('listxattr', path)
buf = create_string_buffer('\x00'.join(ret)) if ret else ''
bufsize = len(buf)
if bool(namebuf):
if bufsize > size:
return -ERANGE
memmove(namebuf, buf, bufsize)
return bufsize
def removexattr(self, path, name):
return self.operations('removexattr', path, name)
def opendir(self, path, fip):
# Ignore raw_fi
fip.contents.fh = self.operations('opendir', path)
return 0
def readdir(self, path, buf, filler, offset, fip):
# Ignore raw_fi
for item in self.operations('readdir', path, fip.contents.fh):
if isinstance(item, str):
name, st, offset = item, None, 0
else:
name, attrs, offset = item
if attrs:
st = c_stat()
set_st_attrs(st, attrs)
else:
st = None
if filler(buf, name, st, offset) != 0:
break
return 0
def releasedir(self, path, fip):
# Ignore raw_fi
return self.operations('releasedir', path, fip.contents.fh)
def fsyncdir(self, path, datasync, fip):
# Ignore raw_fi
return self.operations('fsyncdir', path, datasync, fip.contents.fh)
def init(self, conn):
return self.operations('init', '/')
def destroy(self, private_data):
return self.operations('destroy', '/')
def access(self, path, amode):
return self.operations('access', path, amode)
def create(self, path, mode, fip):
fi = fip.contents
if self.raw_fi:
return self.operations('create', path, mode, fi)
else:
fi.fh = self.operations('create', path, mode)
return 0
def ftruncate(self, path, length, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('truncate', path, length, fh)
def fgetattr(self, path, buf, fip):
memset(buf, 0, sizeof(c_stat))
st = buf.contents
fh = fip and (fip.contents if self.raw_fi else fip.contents.fh)
attrs = self.operations('getattr', path, fh)
set_st_attrs(st, attrs)
return 0
def lock(self, path, fip, cmd, lock):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('lock', path, fh, cmd, lock)
def utimens(self, path, buf):
if buf:
atime = time_of_timespec(buf.contents.actime)
mtime = time_of_timespec(buf.contents.modtime)
times = (atime, mtime)
else:
times = None
return self.operations('utimens', path, times)
def bmap(self, path, blocksize, idx):
return self.operations('bmap', path, blocksize, idx)
class Operations(object):
"""This class should be subclassed and passed as an argument to FUSE on
initialization. All operations should raise a FuseOSError exception
on error.
When in doubt of what an operation should do, check the FUSE header
file or the corresponding system call man page."""
def __call__(self, op, *args):
if not hasattr(self, op):
raise FuseOSError(EFAULT)
return getattr(self, op)(*args)
def access(self, path, amode):
return 0
bmap = None
def chmod(self, path, mode):
raise FuseOSError(EROFS)
def chown(self, path, uid, gid):
raise FuseOSError(EROFS)
def create(self, path, mode, fi=None):
"""When raw_fi is False (default case), fi is None and create should
return a numerical file handle.
When raw_fi is True the file handle should be set directly by create
and return 0."""
raise FuseOSError(EROFS)
def destroy(self, path):
"""Called on filesystem destruction. Path is always /"""
pass
def flush(self, path, fh):
return 0
def fsync(self, path, datasync, fh):
return 0
def fsyncdir(self, path, datasync, fh):
return 0
def getattr(self, path, fh=None):
"""Returns a dictionary with keys identical to the stat C structure
of stat(2).
st_atime, st_mtime and st_ctime should be floats.
NOTE: There is an incombatibility between Linux and Mac OS X concerning
st_nlink of directories. Mac OS X counts all files inside the directory,
while Linux counts only the subdirectories."""
if path != '/':
raise FuseOSError(ENOENT)
return dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
def getxattr(self, path, name, position=0):
raise FuseOSError(ENOTSUP)
def init(self, path):
"""Called on filesystem initialization. Path is always /
Use it instead of __init__ if you start threads on initialization."""
pass
def link(self, target, source):
raise FuseOSError(EROFS)
def listxattr(self, path):
return []
lock = None
def mkdir(self, path, mode):
raise FuseOSError(EROFS)
def mknod(self, path, mode, dev):
raise FuseOSError(EROFS)
def open(self, path, flags):
"""When raw_fi is False (default case), open should return a numerical
file handle.
When raw_fi is True the signature of open becomes:
open(self, path, fi)
and the file handle should be set directly."""
return 0
def opendir(self, path):
"""Returns a numerical file handle."""
return 0
def read(self, path, size, offset, fh):
"""Returns a string containing the data requested."""
raise FuseOSError(EIO)
def readdir(self, path, fh):
"""Can return either a list of names, or a list of (name, attrs, offset)
tuples. attrs is a dict as in getattr."""
return ['.', '..']
def readlink(self, path):
raise FuseOSError(ENOENT)
def release(self, path, fh):
return 0
def releasedir(self, path, fh):
return 0
def removexattr(self, path, name):
raise FuseOSError(ENOTSUP)
def rename(self, old, new):
raise FuseOSError(EROFS)
def rmdir(self, path):
raise FuseOSError(EROFS)
def setxattr(self, path, name, value, options, position=0):
raise FuseOSError(ENOTSUP)
def statfs(self, path):
"""Returns a dictionary with keys identical to the statvfs C structure
of statvfs(3).
On Mac OS X f_bsize and f_frsize must be a power of 2 (minimum 512)."""
return {}
def symlink(self, target, source):
raise FuseOSError(EROFS)
def truncate(self, path, length, fh=None):
raise FuseOSError(EROFS)
def unlink(self, path):
raise FuseOSError(EROFS)
def utimens(self, path, times=None):
"""Times is a (atime, mtime) tuple. If None use current time."""
return 0
def write(self, path, data, offset, fh):
raise FuseOSError(EROFS)
class LoggingMixIn:
def __call__(self, op, path, *args):
print '->', op, path, repr(args)
ret = '[Unhandled Exception]'
try:
ret = getattr(self, op)(path, *args)
return ret
except OSError, e:
ret = str(e)
raise
finally:
print '<-', op, repr(ret)
|
apache-2.0
|
icaoberg/cellorganizer-galaxy-tools
|
datatypes/dataproviders/base.py
|
1
|
12481
|
"""
Base class(es) for all DataProviders.
"""
# there's a blurry line between functionality here and functionality in datatypes module
# attempting to keep parsing to a minimum here and focus on chopping/pagination/reformat(/filtering-maybe?)
# and using as much pre-computed info/metadata from the datatypes module as possible
# also, this shouldn't be a replacement/re-implementation of the tool layer
# (which provides traceability/versioning/reproducibility)
from collections import deque
import exceptions
import logging
log = logging.getLogger( __name__ )
_TODO = """
hooks into datatypes (define providers inside datatype modules) as factories
capture tell() when provider is done
def stop( self ): self.endpoint = source.tell(); raise StopIteration()
implement __len__ sensibly where it can be (would be good to have where we're giving some progress - '100 of 300')
seems like sniffed files would have this info
unit tests
add datum entry/exit point methods: possibly decode, encode
or create a class that pipes source through - how would decode work then?
icorporate existing visualization/dataproviders
some of the sources (esp. in datasets) don't need to be re-created
YAGNI: InterleavingMultiSourceDataProvider, CombiningMultiSourceDataProvider
datasets API entry point:
kwargs should be parsed from strings 2 layers up (in the DatasetsAPI) - that's the 'proper' place for that.
but how would it know how/what to parse if it doesn't have access to the classes used in the provider?
Building a giant list by sweeping all possible dprov classes doesn't make sense
For now - I'm burying them in the class __init__s - but I don't like that
"""
# ----------------------------------------------------------------------------- base classes
class HasSettings( type ):
"""
Metaclass for data providers that allows defining and inheriting
a dictionary named 'settings'.
Useful for allowing class level access to expected variable types
passed to class `__init__` functions so they can be parsed from a query string.
"""
# yeah - this is all too acrobatic
def __new__( cls, name, base_classes, attributes ):
settings = {}
# get settings defined in base classes
for base_class in base_classes:
base_settings = getattr( base_class, 'settings', None )
if base_settings:
settings.update( base_settings )
# get settings defined in this class
new_settings = attributes.pop( 'settings', None )
if new_settings:
settings.update( new_settings )
attributes[ 'settings' ] = settings
return type.__new__( cls, name, base_classes, attributes )
# ----------------------------------------------------------------------------- base classes
class DataProvider( object ):
"""
Base class for all data providers. Data providers:
(a) have a source (which must be another file-like object)
(b) implement both the iterator and context manager interfaces
(c) do not allow write methods
(but otherwise implement the other file object interface methods)
"""
# a definition of expected types for keyword arguments sent to __init__
# useful for controlling how query string dictionaries can be parsed into correct types for __init__
# empty in this base class
__metaclass__ = HasSettings
settings = {}
def __init__( self, source, **kwargs ):
"""
:param source: the source that this iterator will loop over.
(Should implement the iterable interface and ideally have the
context manager interface as well)
"""
self.source = self.validate_source( source )
def validate_source( self, source ):
"""
Is this a valid source for this provider?
:raises InvalidDataProviderSource: if the source is considered invalid.
Meant to be overridden in subclasses.
"""
if not source or not hasattr( source, '__iter__' ):
# that's by no means a thorough check
raise exceptions.InvalidDataProviderSource( source )
return source
# TODO: (this might cause problems later...)
# TODO: some providers (such as chunk's seek and read) rely on this... remove
def __getattr__( self, name ):
if name == 'source':
# if we're inside this fn, source hasn't been set - provide some safety just for this attr
return None
# otherwise, try to get the attr from the source - allows us to get things like provider.encoding, etc.
if hasattr( self.source, name ):
return getattr( self.source, name )
# raise the proper error
return self.__getattribute__( name )
# write methods should not be allowed
def truncate( self, size ):
raise NotImplementedError( 'Write methods are purposely disabled' )
def write( self, string ):
raise NotImplementedError( 'Write methods are purposely disabled' )
def writelines( self, sequence ):
raise NotImplementedError( 'Write methods are purposely disabled' )
# TODO: route read methods through next?
# def readline( self ):
# return self.next()
def readlines( self ):
return [ line for line in self ]
# iterator interface
def __iter__( self ):
# it's generators all the way up, Timmy
with self:
for datum in self.source:
yield datum
def next( self ):
return self.source.next()
# context manager interface
def __enter__( self ):
# make the source's context manager interface optional
if hasattr( self.source, '__enter__' ):
self.source.__enter__()
return self
def __exit__( self, *args ):
# make the source's context manager interface optional, call on source if there
if hasattr( self.source, '__exit__' ):
self.source.__exit__( *args )
# alternately, call close()
elif hasattr( self.source, 'close' ):
self.source.close()
def __str__( self ):
"""
String representation for easier debugging.
Will call `__str__` on its source so this will display piped dataproviders.
"""
# we need to protect against recursion (in __getattr__) if self.source hasn't been set
source_str = str( self.source ) if hasattr( self, 'source' ) else ''
return '%s(%s)' % ( self.__class__.__name__, str( source_str ) )
class FilteredDataProvider( DataProvider ):
"""
Passes each datum through a filter function and yields it if that function
returns a non-`None` value.
Also maintains counters:
- `num_data_read`: how many data have been consumed from the source.
- `num_valid_data_read`: how many data have been returned from `filter`.
- `num_data_returned`: how many data has this provider yielded.
"""
# not useful here - we don't want functions over the query string
# settings.update({ 'filter_fn': 'function' })
def __init__( self, source, filter_fn=None, **kwargs ):
"""
:param filter_fn: a lambda or function that will be passed a datum and
return either the (optionally modified) datum or None.
"""
super( FilteredDataProvider, self ).__init__( source, **kwargs )
self.filter_fn = filter_fn if hasattr( filter_fn, '__call__' ) else None
# count how many data we got from the source
self.num_data_read = 0
# how many valid data have we gotten from the source
# IOW, data that's passed the filter and been either provided OR have been skipped due to offset
self.num_valid_data_read = 0
# how many lines have been provided/output
self.num_data_returned = 0
def __iter__( self ):
parent_gen = super( FilteredDataProvider, self ).__iter__()
for datum in parent_gen:
self.num_data_read += 1
datum = self.filter( datum )
if datum is not None:
self.num_valid_data_read += 1
self.num_data_returned += 1
yield datum
# TODO: may want to squash this into DataProvider
def filter( self, datum ):
"""
When given a datum from the provider's source, return None if the datum
'does not pass' the filter or is invalid. Return the datum if it's valid.
:param datum: the datum to check for validity.
:returns: the datum, a modified datum, or None
Meant to be overridden.
"""
if self.filter_fn:
return self.filter_fn( datum )
# also can be overriden entirely
return datum
class LimitedOffsetDataProvider( FilteredDataProvider ):
"""
A provider that uses the counters from FilteredDataProvider to limit the
number of data and/or skip `offset` number of data before providing.
Useful for grabbing sections from a source (e.g. pagination).
"""
# define the expected types of these __init__ arguments so they can be parsed out from query strings
settings = {
'limit' : 'int',
'offset': 'int'
}
# TODO: may want to squash this into DataProvider
def __init__( self, source, offset=0, limit=None, **kwargs ):
"""
:param offset: the number of data to skip before providing.
:param limit: the final number of data to provide.
"""
super( LimitedOffsetDataProvider, self ).__init__( source, **kwargs )
# how many valid data to skip before we start outputing data - must be positive
# (diff to support neg. indeces - must be pos.)
self.offset = max( offset, 0 )
# how many valid data to return - must be positive (None indicates no limit)
self.limit = limit
if self.limit is not None:
self.limit = max( self.limit, 0 )
def __iter__( self ):
"""
Iterate over the source until `num_valid_data_read` is greater than
`offset`, begin providing datat, and stop when `num_data_returned`
is greater than `offset`.
"""
if self.limit is not None and self.limit <= 0:
return
yield
parent_gen = super( LimitedOffsetDataProvider, self ).__iter__()
for datum in parent_gen:
self.num_data_returned -= 1
# print 'self.num_data_returned:', self.num_data_returned
# print 'self.num_valid_data_read:', self.num_valid_data_read
if self.num_valid_data_read > self.offset:
self.num_data_returned += 1
yield datum
if self.limit is not None and self.num_data_returned >= self.limit:
break
# TODO: skipping lines is inefficient - somehow cache file position/line_num pair and allow provider
# to seek to a pos/line and then begin providing lines
# the important catch here is that we need to have accurate pos/line pairs
# in order to preserve the functionality of limit and offset
# if file_seek and len( file_seek ) == 2:
# seek_pos, new_line_num = file_seek
# self.seek_and_set_curr_line( seek_pos, new_line_num )
# def seek_and_set_curr_line( self, file_seek, new_curr_line_num ):
# self.seek( file_seek, os.SEEK_SET )
# self.curr_line_num = new_curr_line_num
class MultiSourceDataProvider( DataProvider ):
"""
A provider that iterates over a list of given sources and provides data
from one after another.
An iterator over iterators.
"""
def __init__( self, source_list, **kwargs ):
"""
:param source_list: an iterator of iterables
"""
self.source_list = deque( source_list )
def __iter__( self ):
"""
Iterate over the source_list, then iterate over the data in each source.
Skip a given source in `source_list` if it is `None` or invalid.
"""
for source in self.source_list:
# just skip falsy sources
if not source:
continue
try:
self.source = self.validate_source( source )
except exceptions.InvalidDataProviderSource:
continue
parent_gen = super( MultiSourceDataProvider, self ).__iter__()
for datum in parent_gen:
yield datum
|
gpl-3.0
|
mcalhoun/ansible
|
lib/ansible/compat/__init__.py
|
332
|
1088
|
# (c) 2014, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat library for ansible. This contains compatibility definitions for older python
When we need to import a module differently depending on python version, do it
here. Then in the code we can simply import from compat in order to get what we want.
'''
|
gpl-3.0
|
rockyzhang/zhangyanhit-python-for-android-mips
|
python3-alpha/python3-src/Lib/test/test_math.py
|
55
|
46049
|
# Python test set -- math module
# XXXX Should not do tests around zero only
from test.support import run_unittest, verbose, requires_IEEE_754
import unittest
import math
import os
import sys
import random
import struct
import sysconfig
eps = 1E-05
NAN = float('nan')
INF = float('inf')
NINF = float('-inf')
# detect evidence of double-rounding: fsum is not always correctly
# rounded on machines that suffer from double rounding.
x, y = 1e16, 2.9999 # use temporary values to defeat peephole optimizer
HAVE_DOUBLE_ROUNDING = (x + y == 1e16 + 4)
# locate file with test values
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
test_dir = os.path.dirname(file) or os.curdir
math_testcases = os.path.join(test_dir, 'math_testcases.txt')
test_file = os.path.join(test_dir, 'cmath_testcases.txt')
def to_ulps(x):
"""Convert a non-NaN float x to an integer, in such a way that
adjacent floats are converted to adjacent integers. Then
abs(ulps(x) - ulps(y)) gives the difference in ulps between two
floats.
The results from this function will only make sense on platforms
where C doubles are represented in IEEE 754 binary64 format.
"""
n = struct.unpack('<q', struct.pack('<d', x))[0]
if n < 0:
n = ~(n+2**63)
return n
def ulps_check(expected, got, ulps=20):
"""Given non-NaN floats `expected` and `got`,
check that they're equal to within the given number of ulps.
Returns None on success and an error message on failure."""
ulps_error = to_ulps(got) - to_ulps(expected)
if abs(ulps_error) <= ulps:
return None
return "error = {} ulps; permitted error = {} ulps".format(ulps_error,
ulps)
# Here's a pure Python version of the math.factorial algorithm, for
# documentation and comparison purposes.
#
# Formula:
#
# factorial(n) = factorial_odd_part(n) << (n - count_set_bits(n))
#
# where
#
# factorial_odd_part(n) = product_{i >= 0} product_{0 < j <= n >> i; j odd} j
#
# The outer product above is an infinite product, but once i >= n.bit_length,
# (n >> i) < 1 and the corresponding term of the product is empty. So only the
# finitely many terms for 0 <= i < n.bit_length() contribute anything.
#
# We iterate downwards from i == n.bit_length() - 1 to i == 0. The inner
# product in the formula above starts at 1 for i == n.bit_length(); for each i
# < n.bit_length() we get the inner product for i from that for i + 1 by
# multiplying by all j in {n >> i+1 < j <= n >> i; j odd}. In Python terms,
# this set is range((n >> i+1) + 1 | 1, (n >> i) + 1 | 1, 2).
def count_set_bits(n):
"""Number of '1' bits in binary expansion of a nonnnegative integer."""
return 1 + count_set_bits(n & n - 1) if n else 0
def partial_product(start, stop):
"""Product of integers in range(start, stop, 2), computed recursively.
start and stop should both be odd, with start <= stop.
"""
numfactors = (stop - start) >> 1
if not numfactors:
return 1
elif numfactors == 1:
return start
else:
mid = (start + numfactors) | 1
return partial_product(start, mid) * partial_product(mid, stop)
def py_factorial(n):
"""Factorial of nonnegative integer n, via "Binary Split Factorial Formula"
described at http://www.luschny.de/math/factorial/binarysplitfact.html
"""
inner = outer = 1
for i in reversed(range(n.bit_length())):
inner *= partial_product((n >> i + 1) + 1 | 1, (n >> i) + 1 | 1)
outer *= inner
return outer << (n - count_set_bits(n))
def acc_check(expected, got, rel_err=2e-15, abs_err = 5e-323):
"""Determine whether non-NaN floats a and b are equal to within a
(small) rounding error. The default values for rel_err and
abs_err are chosen to be suitable for platforms where a float is
represented by an IEEE 754 double. They allow an error of between
9 and 19 ulps."""
# need to special case infinities, since inf - inf gives nan
if math.isinf(expected) and got == expected:
return None
error = got - expected
permitted_error = max(abs_err, rel_err * abs(expected))
if abs(error) < permitted_error:
return None
return "error = {}; permitted error = {}".format(error,
permitted_error)
def parse_mtestfile(fname):
"""Parse a file with test values
-- starts a comment
blank lines, or lines containing only a comment, are ignored
other lines are expected to have the form
id fn arg -> expected [flag]*
"""
with open(fname) as fp:
for line in fp:
# strip comments, and skip blank lines
if '--' in line:
line = line[:line.index('--')]
if not line.strip():
continue
lhs, rhs = line.split('->')
id, fn, arg = lhs.split()
rhs_pieces = rhs.split()
exp = rhs_pieces[0]
flags = rhs_pieces[1:]
yield (id, fn, float(arg), float(exp), flags)
def parse_testfile(fname):
"""Parse a file with test values
Empty lines or lines starting with -- are ignored
yields id, fn, arg_real, arg_imag, exp_real, exp_imag
"""
with open(fname) as fp:
for line in fp:
# skip comment lines and blank lines
if line.startswith('--') or not line.strip():
continue
lhs, rhs = line.split('->')
id, fn, arg_real, arg_imag = lhs.split()
rhs_pieces = rhs.split()
exp_real, exp_imag = rhs_pieces[0], rhs_pieces[1]
flags = rhs_pieces[2:]
yield (id, fn,
float(arg_real), float(arg_imag),
float(exp_real), float(exp_imag),
flags
)
class MathTests(unittest.TestCase):
def ftest(self, name, value, expected):
if abs(value-expected) > eps:
# Use %r instead of %f so the error message
# displays full precision. Otherwise discrepancies
# in the last few bits will lead to very confusing
# error messages
self.fail('%s returned %r, expected %r' %
(name, value, expected))
def testConstants(self):
self.ftest('pi', math.pi, 3.1415926)
self.ftest('e', math.e, 2.7182818)
def testAcos(self):
self.assertRaises(TypeError, math.acos)
self.ftest('acos(-1)', math.acos(-1), math.pi)
self.ftest('acos(0)', math.acos(0), math.pi/2)
self.ftest('acos(1)', math.acos(1), 0)
self.assertRaises(ValueError, math.acos, INF)
self.assertRaises(ValueError, math.acos, NINF)
self.assertTrue(math.isnan(math.acos(NAN)))
def testAcosh(self):
self.assertRaises(TypeError, math.acosh)
self.ftest('acosh(1)', math.acosh(1), 0)
self.ftest('acosh(2)', math.acosh(2), 1.3169578969248168)
self.assertRaises(ValueError, math.acosh, 0)
self.assertRaises(ValueError, math.acosh, -1)
self.assertEqual(math.acosh(INF), INF)
self.assertRaises(ValueError, math.acosh, NINF)
self.assertTrue(math.isnan(math.acosh(NAN)))
def testAsin(self):
self.assertRaises(TypeError, math.asin)
self.ftest('asin(-1)', math.asin(-1), -math.pi/2)
self.ftest('asin(0)', math.asin(0), 0)
self.ftest('asin(1)', math.asin(1), math.pi/2)
self.assertRaises(ValueError, math.asin, INF)
self.assertRaises(ValueError, math.asin, NINF)
self.assertTrue(math.isnan(math.asin(NAN)))
def testAsinh(self):
self.assertRaises(TypeError, math.asinh)
self.ftest('asinh(0)', math.asinh(0), 0)
self.ftest('asinh(1)', math.asinh(1), 0.88137358701954305)
self.ftest('asinh(-1)', math.asinh(-1), -0.88137358701954305)
self.assertEqual(math.asinh(INF), INF)
self.assertEqual(math.asinh(NINF), NINF)
self.assertTrue(math.isnan(math.asinh(NAN)))
def testAtan(self):
self.assertRaises(TypeError, math.atan)
self.ftest('atan(-1)', math.atan(-1), -math.pi/4)
self.ftest('atan(0)', math.atan(0), 0)
self.ftest('atan(1)', math.atan(1), math.pi/4)
self.ftest('atan(inf)', math.atan(INF), math.pi/2)
self.ftest('atan(-inf)', math.atan(NINF), -math.pi/2)
self.assertTrue(math.isnan(math.atan(NAN)))
def testAtanh(self):
self.assertRaises(TypeError, math.atan)
self.ftest('atanh(0)', math.atanh(0), 0)
self.ftest('atanh(0.5)', math.atanh(0.5), 0.54930614433405489)
self.ftest('atanh(-0.5)', math.atanh(-0.5), -0.54930614433405489)
self.assertRaises(ValueError, math.atanh, 1)
self.assertRaises(ValueError, math.atanh, -1)
self.assertRaises(ValueError, math.atanh, INF)
self.assertRaises(ValueError, math.atanh, NINF)
self.assertTrue(math.isnan(math.atanh(NAN)))
def testAtan2(self):
self.assertRaises(TypeError, math.atan2)
self.ftest('atan2(-1, 0)', math.atan2(-1, 0), -math.pi/2)
self.ftest('atan2(-1, 1)', math.atan2(-1, 1), -math.pi/4)
self.ftest('atan2(0, 1)', math.atan2(0, 1), 0)
self.ftest('atan2(1, 1)', math.atan2(1, 1), math.pi/4)
self.ftest('atan2(1, 0)', math.atan2(1, 0), math.pi/2)
# math.atan2(0, x)
self.ftest('atan2(0., -inf)', math.atan2(0., NINF), math.pi)
self.ftest('atan2(0., -2.3)', math.atan2(0., -2.3), math.pi)
self.ftest('atan2(0., -0.)', math.atan2(0., -0.), math.pi)
self.assertEqual(math.atan2(0., 0.), 0.)
self.assertEqual(math.atan2(0., 2.3), 0.)
self.assertEqual(math.atan2(0., INF), 0.)
self.assertTrue(math.isnan(math.atan2(0., NAN)))
# math.atan2(-0, x)
self.ftest('atan2(-0., -inf)', math.atan2(-0., NINF), -math.pi)
self.ftest('atan2(-0., -2.3)', math.atan2(-0., -2.3), -math.pi)
self.ftest('atan2(-0., -0.)', math.atan2(-0., -0.), -math.pi)
self.assertEqual(math.atan2(-0., 0.), -0.)
self.assertEqual(math.atan2(-0., 2.3), -0.)
self.assertEqual(math.atan2(-0., INF), -0.)
self.assertTrue(math.isnan(math.atan2(-0., NAN)))
# math.atan2(INF, x)
self.ftest('atan2(inf, -inf)', math.atan2(INF, NINF), math.pi*3/4)
self.ftest('atan2(inf, -2.3)', math.atan2(INF, -2.3), math.pi/2)
self.ftest('atan2(inf, -0.)', math.atan2(INF, -0.0), math.pi/2)
self.ftest('atan2(inf, 0.)', math.atan2(INF, 0.0), math.pi/2)
self.ftest('atan2(inf, 2.3)', math.atan2(INF, 2.3), math.pi/2)
self.ftest('atan2(inf, inf)', math.atan2(INF, INF), math.pi/4)
self.assertTrue(math.isnan(math.atan2(INF, NAN)))
# math.atan2(NINF, x)
self.ftest('atan2(-inf, -inf)', math.atan2(NINF, NINF), -math.pi*3/4)
self.ftest('atan2(-inf, -2.3)', math.atan2(NINF, -2.3), -math.pi/2)
self.ftest('atan2(-inf, -0.)', math.atan2(NINF, -0.0), -math.pi/2)
self.ftest('atan2(-inf, 0.)', math.atan2(NINF, 0.0), -math.pi/2)
self.ftest('atan2(-inf, 2.3)', math.atan2(NINF, 2.3), -math.pi/2)
self.ftest('atan2(-inf, inf)', math.atan2(NINF, INF), -math.pi/4)
self.assertTrue(math.isnan(math.atan2(NINF, NAN)))
# math.atan2(+finite, x)
self.ftest('atan2(2.3, -inf)', math.atan2(2.3, NINF), math.pi)
self.ftest('atan2(2.3, -0.)', math.atan2(2.3, -0.), math.pi/2)
self.ftest('atan2(2.3, 0.)', math.atan2(2.3, 0.), math.pi/2)
self.assertEqual(math.atan2(2.3, INF), 0.)
self.assertTrue(math.isnan(math.atan2(2.3, NAN)))
# math.atan2(-finite, x)
self.ftest('atan2(-2.3, -inf)', math.atan2(-2.3, NINF), -math.pi)
self.ftest('atan2(-2.3, -0.)', math.atan2(-2.3, -0.), -math.pi/2)
self.ftest('atan2(-2.3, 0.)', math.atan2(-2.3, 0.), -math.pi/2)
self.assertEqual(math.atan2(-2.3, INF), -0.)
self.assertTrue(math.isnan(math.atan2(-2.3, NAN)))
# math.atan2(NAN, x)
self.assertTrue(math.isnan(math.atan2(NAN, NINF)))
self.assertTrue(math.isnan(math.atan2(NAN, -2.3)))
self.assertTrue(math.isnan(math.atan2(NAN, -0.)))
self.assertTrue(math.isnan(math.atan2(NAN, 0.)))
self.assertTrue(math.isnan(math.atan2(NAN, 2.3)))
self.assertTrue(math.isnan(math.atan2(NAN, INF)))
self.assertTrue(math.isnan(math.atan2(NAN, NAN)))
def testCeil(self):
self.assertRaises(TypeError, math.ceil)
self.assertEqual(int, type(math.ceil(0.5)))
self.ftest('ceil(0.5)', math.ceil(0.5), 1)
self.ftest('ceil(1.0)', math.ceil(1.0), 1)
self.ftest('ceil(1.5)', math.ceil(1.5), 2)
self.ftest('ceil(-0.5)', math.ceil(-0.5), 0)
self.ftest('ceil(-1.0)', math.ceil(-1.0), -1)
self.ftest('ceil(-1.5)', math.ceil(-1.5), -1)
#self.assertEqual(math.ceil(INF), INF)
#self.assertEqual(math.ceil(NINF), NINF)
#self.assertTrue(math.isnan(math.ceil(NAN)))
class TestCeil:
def __ceil__(self):
return 42
class TestNoCeil:
pass
self.ftest('ceil(TestCeil())', math.ceil(TestCeil()), 42)
self.assertRaises(TypeError, math.ceil, TestNoCeil())
t = TestNoCeil()
t.__ceil__ = lambda *args: args
self.assertRaises(TypeError, math.ceil, t)
self.assertRaises(TypeError, math.ceil, t, 0)
@requires_IEEE_754
def testCopysign(self):
self.assertEqual(math.copysign(1, 42), 1.0)
self.assertEqual(math.copysign(0., 42), 0.0)
self.assertEqual(math.copysign(1., -42), -1.0)
self.assertEqual(math.copysign(3, 0.), 3.0)
self.assertEqual(math.copysign(4., -0.), -4.0)
self.assertRaises(TypeError, math.copysign)
# copysign should let us distinguish signs of zeros
self.assertEqual(math.copysign(1., 0.), 1.)
self.assertEqual(math.copysign(1., -0.), -1.)
self.assertEqual(math.copysign(INF, 0.), INF)
self.assertEqual(math.copysign(INF, -0.), NINF)
self.assertEqual(math.copysign(NINF, 0.), INF)
self.assertEqual(math.copysign(NINF, -0.), NINF)
# and of infinities
self.assertEqual(math.copysign(1., INF), 1.)
self.assertEqual(math.copysign(1., NINF), -1.)
self.assertEqual(math.copysign(INF, INF), INF)
self.assertEqual(math.copysign(INF, NINF), NINF)
self.assertEqual(math.copysign(NINF, INF), INF)
self.assertEqual(math.copysign(NINF, NINF), NINF)
self.assertTrue(math.isnan(math.copysign(NAN, 1.)))
self.assertTrue(math.isnan(math.copysign(NAN, INF)))
self.assertTrue(math.isnan(math.copysign(NAN, NINF)))
self.assertTrue(math.isnan(math.copysign(NAN, NAN)))
# copysign(INF, NAN) may be INF or it may be NINF, since
# we don't know whether the sign bit of NAN is set on any
# given platform.
self.assertTrue(math.isinf(math.copysign(INF, NAN)))
# similarly, copysign(2., NAN) could be 2. or -2.
self.assertEqual(abs(math.copysign(2., NAN)), 2.)
def testCos(self):
self.assertRaises(TypeError, math.cos)
self.ftest('cos(-pi/2)', math.cos(-math.pi/2), 0)
self.ftest('cos(0)', math.cos(0), 1)
self.ftest('cos(pi/2)', math.cos(math.pi/2), 0)
self.ftest('cos(pi)', math.cos(math.pi), -1)
try:
self.assertTrue(math.isnan(math.cos(INF)))
self.assertTrue(math.isnan(math.cos(NINF)))
except ValueError:
self.assertRaises(ValueError, math.cos, INF)
self.assertRaises(ValueError, math.cos, NINF)
self.assertTrue(math.isnan(math.cos(NAN)))
def testCosh(self):
self.assertRaises(TypeError, math.cosh)
self.ftest('cosh(0)', math.cosh(0), 1)
self.ftest('cosh(2)-2*cosh(1)**2', math.cosh(2)-2*math.cosh(1)**2, -1) # Thanks to Lambert
self.assertEqual(math.cosh(INF), INF)
self.assertEqual(math.cosh(NINF), INF)
self.assertTrue(math.isnan(math.cosh(NAN)))
def testDegrees(self):
self.assertRaises(TypeError, math.degrees)
self.ftest('degrees(pi)', math.degrees(math.pi), 180.0)
self.ftest('degrees(pi/2)', math.degrees(math.pi/2), 90.0)
self.ftest('degrees(-pi/4)', math.degrees(-math.pi/4), -45.0)
def testExp(self):
self.assertRaises(TypeError, math.exp)
self.ftest('exp(-1)', math.exp(-1), 1/math.e)
self.ftest('exp(0)', math.exp(0), 1)
self.ftest('exp(1)', math.exp(1), math.e)
self.assertEqual(math.exp(INF), INF)
self.assertEqual(math.exp(NINF), 0.)
self.assertTrue(math.isnan(math.exp(NAN)))
def testFabs(self):
self.assertRaises(TypeError, math.fabs)
self.ftest('fabs(-1)', math.fabs(-1), 1)
self.ftest('fabs(0)', math.fabs(0), 0)
self.ftest('fabs(1)', math.fabs(1), 1)
def testFactorial(self):
self.assertEqual(math.factorial(0), 1)
self.assertEqual(math.factorial(0.0), 1)
total = 1
for i in range(1, 1000):
total *= i
self.assertEqual(math.factorial(i), total)
self.assertEqual(math.factorial(float(i)), total)
self.assertEqual(math.factorial(i), py_factorial(i))
self.assertRaises(ValueError, math.factorial, -1)
self.assertRaises(ValueError, math.factorial, -1.0)
self.assertRaises(ValueError, math.factorial, math.pi)
self.assertRaises(OverflowError, math.factorial, sys.maxsize+1)
self.assertRaises(OverflowError, math.factorial, 10e100)
def testFloor(self):
self.assertRaises(TypeError, math.floor)
self.assertEqual(int, type(math.floor(0.5)))
self.ftest('floor(0.5)', math.floor(0.5), 0)
self.ftest('floor(1.0)', math.floor(1.0), 1)
self.ftest('floor(1.5)', math.floor(1.5), 1)
self.ftest('floor(-0.5)', math.floor(-0.5), -1)
self.ftest('floor(-1.0)', math.floor(-1.0), -1)
self.ftest('floor(-1.5)', math.floor(-1.5), -2)
# pow() relies on floor() to check for integers
# This fails on some platforms - so check it here
self.ftest('floor(1.23e167)', math.floor(1.23e167), 1.23e167)
self.ftest('floor(-1.23e167)', math.floor(-1.23e167), -1.23e167)
#self.assertEqual(math.ceil(INF), INF)
#self.assertEqual(math.ceil(NINF), NINF)
#self.assertTrue(math.isnan(math.floor(NAN)))
class TestFloor:
def __floor__(self):
return 42
class TestNoFloor:
pass
self.ftest('floor(TestFloor())', math.floor(TestFloor()), 42)
self.assertRaises(TypeError, math.floor, TestNoFloor())
t = TestNoFloor()
t.__floor__ = lambda *args: args
self.assertRaises(TypeError, math.floor, t)
self.assertRaises(TypeError, math.floor, t, 0)
def testFmod(self):
self.assertRaises(TypeError, math.fmod)
self.ftest('fmod(10,1)', math.fmod(10,1), 0)
self.ftest('fmod(10,0.5)', math.fmod(10,0.5), 0)
self.ftest('fmod(10,1.5)', math.fmod(10,1.5), 1)
self.ftest('fmod(-10,1)', math.fmod(-10,1), 0)
self.ftest('fmod(-10,0.5)', math.fmod(-10,0.5), 0)
self.ftest('fmod(-10,1.5)', math.fmod(-10,1.5), -1)
self.assertTrue(math.isnan(math.fmod(NAN, 1.)))
self.assertTrue(math.isnan(math.fmod(1., NAN)))
self.assertTrue(math.isnan(math.fmod(NAN, NAN)))
self.assertRaises(ValueError, math.fmod, 1., 0.)
self.assertRaises(ValueError, math.fmod, INF, 1.)
self.assertRaises(ValueError, math.fmod, NINF, 1.)
self.assertRaises(ValueError, math.fmod, INF, 0.)
self.assertEqual(math.fmod(3.0, INF), 3.0)
self.assertEqual(math.fmod(-3.0, INF), -3.0)
self.assertEqual(math.fmod(3.0, NINF), 3.0)
self.assertEqual(math.fmod(-3.0, NINF), -3.0)
self.assertEqual(math.fmod(0.0, 3.0), 0.0)
self.assertEqual(math.fmod(0.0, NINF), 0.0)
def testFrexp(self):
self.assertRaises(TypeError, math.frexp)
def testfrexp(name, result, expected):
(mant, exp), (emant, eexp) = result, expected
if abs(mant-emant) > eps or exp != eexp:
self.fail('%s returned %r, expected %r'%\
(name, result, expected))
testfrexp('frexp(-1)', math.frexp(-1), (-0.5, 1))
testfrexp('frexp(0)', math.frexp(0), (0, 0))
testfrexp('frexp(1)', math.frexp(1), (0.5, 1))
testfrexp('frexp(2)', math.frexp(2), (0.5, 2))
self.assertEqual(math.frexp(INF)[0], INF)
self.assertEqual(math.frexp(NINF)[0], NINF)
self.assertTrue(math.isnan(math.frexp(NAN)[0]))
@requires_IEEE_754
@unittest.skipIf(HAVE_DOUBLE_ROUNDING,
"fsum is not exact on machines with double rounding")
def testFsum(self):
# math.fsum relies on exact rounding for correct operation.
# There's a known problem with IA32 floating-point that causes
# inexact rounding in some situations, and will cause the
# math.fsum tests below to fail; see issue #2937. On non IEEE
# 754 platforms, and on IEEE 754 platforms that exhibit the
# problem described in issue #2937, we simply skip the whole
# test.
# Python version of math.fsum, for comparison. Uses a
# different algorithm based on frexp, ldexp and integer
# arithmetic.
from sys import float_info
mant_dig = float_info.mant_dig
etiny = float_info.min_exp - mant_dig
def msum(iterable):
"""Full precision summation. Compute sum(iterable) without any
intermediate accumulation of error. Based on the 'lsum' function
at http://code.activestate.com/recipes/393090/
"""
tmant, texp = 0, 0
for x in iterable:
mant, exp = math.frexp(x)
mant, exp = int(math.ldexp(mant, mant_dig)), exp - mant_dig
if texp > exp:
tmant <<= texp-exp
texp = exp
else:
mant <<= exp-texp
tmant += mant
# Round tmant * 2**texp to a float. The original recipe
# used float(str(tmant)) * 2.0**texp for this, but that's
# a little unsafe because str -> float conversion can't be
# relied upon to do correct rounding on all platforms.
tail = max(len(bin(abs(tmant)))-2 - mant_dig, etiny - texp)
if tail > 0:
h = 1 << (tail-1)
tmant = tmant // (2*h) + bool(tmant & h and tmant & 3*h-1)
texp += tail
return math.ldexp(tmant, texp)
test_values = [
([], 0.0),
([0.0], 0.0),
([1e100, 1.0, -1e100, 1e-100, 1e50, -1.0, -1e50], 1e-100),
([2.0**53, -0.5, -2.0**-54], 2.0**53-1.0),
([2.0**53, 1.0, 2.0**-100], 2.0**53+2.0),
([2.0**53+10.0, 1.0, 2.0**-100], 2.0**53+12.0),
([2.0**53-4.0, 0.5, 2.0**-54], 2.0**53-3.0),
([1./n for n in range(1, 1001)],
float.fromhex('0x1.df11f45f4e61ap+2')),
([(-1.)**n/n for n in range(1, 1001)],
float.fromhex('-0x1.62a2af1bd3624p-1')),
([1.7**(i+1)-1.7**i for i in range(1000)] + [-1.7**1000], -1.0),
([1e16, 1., 1e-16], 10000000000000002.0),
([1e16-2., 1.-2.**-53, -(1e16-2.), -(1.-2.**-53)], 0.0),
# exercise code for resizing partials array
([2.**n - 2.**(n+50) + 2.**(n+52) for n in range(-1074, 972, 2)] +
[-2.**1022],
float.fromhex('0x1.5555555555555p+970')),
]
for i, (vals, expected) in enumerate(test_values):
try:
actual = math.fsum(vals)
except OverflowError:
self.fail("test %d failed: got OverflowError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
except ValueError:
self.fail("test %d failed: got ValueError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
self.assertEqual(actual, expected)
from random import random, gauss, shuffle
for j in range(1000):
vals = [7, 1e100, -7, -1e100, -9e-20, 8e-20] * 10
s = 0
for i in range(200):
v = gauss(0, random()) ** 7 - s
s += v
vals.append(v)
shuffle(vals)
s = msum(vals)
self.assertEqual(msum(vals), math.fsum(vals))
def testHypot(self):
self.assertRaises(TypeError, math.hypot)
self.ftest('hypot(0,0)', math.hypot(0,0), 0)
self.ftest('hypot(3,4)', math.hypot(3,4), 5)
self.assertEqual(math.hypot(NAN, INF), INF)
self.assertEqual(math.hypot(INF, NAN), INF)
self.assertEqual(math.hypot(NAN, NINF), INF)
self.assertEqual(math.hypot(NINF, NAN), INF)
self.assertTrue(math.isnan(math.hypot(1.0, NAN)))
self.assertTrue(math.isnan(math.hypot(NAN, -2.0)))
def testLdexp(self):
self.assertRaises(TypeError, math.ldexp)
self.ftest('ldexp(0,1)', math.ldexp(0,1), 0)
self.ftest('ldexp(1,1)', math.ldexp(1,1), 2)
self.ftest('ldexp(1,-1)', math.ldexp(1,-1), 0.5)
self.ftest('ldexp(-1,1)', math.ldexp(-1,1), -2)
self.assertRaises(OverflowError, math.ldexp, 1., 1000000)
self.assertRaises(OverflowError, math.ldexp, -1., 1000000)
self.assertEqual(math.ldexp(1., -1000000), 0.)
self.assertEqual(math.ldexp(-1., -1000000), -0.)
self.assertEqual(math.ldexp(INF, 30), INF)
self.assertEqual(math.ldexp(NINF, -213), NINF)
self.assertTrue(math.isnan(math.ldexp(NAN, 0)))
# large second argument
for n in [10**5, 10**10, 10**20, 10**40]:
self.assertEqual(math.ldexp(INF, -n), INF)
self.assertEqual(math.ldexp(NINF, -n), NINF)
self.assertEqual(math.ldexp(1., -n), 0.)
self.assertEqual(math.ldexp(-1., -n), -0.)
self.assertEqual(math.ldexp(0., -n), 0.)
self.assertEqual(math.ldexp(-0., -n), -0.)
self.assertTrue(math.isnan(math.ldexp(NAN, -n)))
self.assertRaises(OverflowError, math.ldexp, 1., n)
self.assertRaises(OverflowError, math.ldexp, -1., n)
self.assertEqual(math.ldexp(0., n), 0.)
self.assertEqual(math.ldexp(-0., n), -0.)
self.assertEqual(math.ldexp(INF, n), INF)
self.assertEqual(math.ldexp(NINF, n), NINF)
self.assertTrue(math.isnan(math.ldexp(NAN, n)))
def testLog(self):
self.assertRaises(TypeError, math.log)
self.ftest('log(1/e)', math.log(1/math.e), -1)
self.ftest('log(1)', math.log(1), 0)
self.ftest('log(e)', math.log(math.e), 1)
self.ftest('log(32,2)', math.log(32,2), 5)
self.ftest('log(10**40, 10)', math.log(10**40, 10), 40)
self.ftest('log(10**40, 10**20)', math.log(10**40, 10**20), 2)
self.ftest('log(10**1000)', math.log(10**1000),
2302.5850929940457)
self.assertRaises(ValueError, math.log, -1.5)
self.assertRaises(ValueError, math.log, -10**1000)
self.assertRaises(ValueError, math.log, NINF)
self.assertEqual(math.log(INF), INF)
self.assertTrue(math.isnan(math.log(NAN)))
def testLog1p(self):
self.assertRaises(TypeError, math.log1p)
n= 2**90
self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
def testLog10(self):
self.assertRaises(TypeError, math.log10)
self.ftest('log10(0.1)', math.log10(0.1), -1)
self.ftest('log10(1)', math.log10(1), 0)
self.ftest('log10(10)', math.log10(10), 1)
self.ftest('log10(10**1000)', math.log10(10**1000), 1000.0)
self.assertRaises(ValueError, math.log10, -1.5)
self.assertRaises(ValueError, math.log10, -10**1000)
self.assertRaises(ValueError, math.log10, NINF)
self.assertEqual(math.log(INF), INF)
self.assertTrue(math.isnan(math.log10(NAN)))
def testModf(self):
self.assertRaises(TypeError, math.modf)
def testmodf(name, result, expected):
(v1, v2), (e1, e2) = result, expected
if abs(v1-e1) > eps or abs(v2-e2):
self.fail('%s returned %r, expected %r'%\
(name, result, expected))
testmodf('modf(1.5)', math.modf(1.5), (0.5, 1.0))
testmodf('modf(-1.5)', math.modf(-1.5), (-0.5, -1.0))
self.assertEqual(math.modf(INF), (0.0, INF))
self.assertEqual(math.modf(NINF), (-0.0, NINF))
modf_nan = math.modf(NAN)
self.assertTrue(math.isnan(modf_nan[0]))
self.assertTrue(math.isnan(modf_nan[1]))
def testPow(self):
self.assertRaises(TypeError, math.pow)
self.ftest('pow(0,1)', math.pow(0,1), 0)
self.ftest('pow(1,0)', math.pow(1,0), 1)
self.ftest('pow(2,1)', math.pow(2,1), 2)
self.ftest('pow(2,-1)', math.pow(2,-1), 0.5)
self.assertEqual(math.pow(INF, 1), INF)
self.assertEqual(math.pow(NINF, 1), NINF)
self.assertEqual((math.pow(1, INF)), 1.)
self.assertEqual((math.pow(1, NINF)), 1.)
self.assertTrue(math.isnan(math.pow(NAN, 1)))
self.assertTrue(math.isnan(math.pow(2, NAN)))
self.assertTrue(math.isnan(math.pow(0, NAN)))
self.assertEqual(math.pow(1, NAN), 1)
# pow(0., x)
self.assertEqual(math.pow(0., INF), 0.)
self.assertEqual(math.pow(0., 3.), 0.)
self.assertEqual(math.pow(0., 2.3), 0.)
self.assertEqual(math.pow(0., 2.), 0.)
self.assertEqual(math.pow(0., 0.), 1.)
self.assertEqual(math.pow(0., -0.), 1.)
self.assertRaises(ValueError, math.pow, 0., -2.)
self.assertRaises(ValueError, math.pow, 0., -2.3)
self.assertRaises(ValueError, math.pow, 0., -3.)
self.assertRaises(ValueError, math.pow, 0., NINF)
self.assertTrue(math.isnan(math.pow(0., NAN)))
# pow(INF, x)
self.assertEqual(math.pow(INF, INF), INF)
self.assertEqual(math.pow(INF, 3.), INF)
self.assertEqual(math.pow(INF, 2.3), INF)
self.assertEqual(math.pow(INF, 2.), INF)
self.assertEqual(math.pow(INF, 0.), 1.)
self.assertEqual(math.pow(INF, -0.), 1.)
self.assertEqual(math.pow(INF, -2.), 0.)
self.assertEqual(math.pow(INF, -2.3), 0.)
self.assertEqual(math.pow(INF, -3.), 0.)
self.assertEqual(math.pow(INF, NINF), 0.)
self.assertTrue(math.isnan(math.pow(INF, NAN)))
# pow(-0., x)
self.assertEqual(math.pow(-0., INF), 0.)
self.assertEqual(math.pow(-0., 3.), -0.)
self.assertEqual(math.pow(-0., 2.3), 0.)
self.assertEqual(math.pow(-0., 2.), 0.)
self.assertEqual(math.pow(-0., 0.), 1.)
self.assertEqual(math.pow(-0., -0.), 1.)
self.assertRaises(ValueError, math.pow, -0., -2.)
self.assertRaises(ValueError, math.pow, -0., -2.3)
self.assertRaises(ValueError, math.pow, -0., -3.)
self.assertRaises(ValueError, math.pow, -0., NINF)
self.assertTrue(math.isnan(math.pow(-0., NAN)))
# pow(NINF, x)
self.assertEqual(math.pow(NINF, INF), INF)
self.assertEqual(math.pow(NINF, 3.), NINF)
self.assertEqual(math.pow(NINF, 2.3), INF)
self.assertEqual(math.pow(NINF, 2.), INF)
self.assertEqual(math.pow(NINF, 0.), 1.)
self.assertEqual(math.pow(NINF, -0.), 1.)
self.assertEqual(math.pow(NINF, -2.), 0.)
self.assertEqual(math.pow(NINF, -2.3), 0.)
self.assertEqual(math.pow(NINF, -3.), -0.)
self.assertEqual(math.pow(NINF, NINF), 0.)
self.assertTrue(math.isnan(math.pow(NINF, NAN)))
# pow(-1, x)
self.assertEqual(math.pow(-1., INF), 1.)
self.assertEqual(math.pow(-1., 3.), -1.)
self.assertRaises(ValueError, math.pow, -1., 2.3)
self.assertEqual(math.pow(-1., 2.), 1.)
self.assertEqual(math.pow(-1., 0.), 1.)
self.assertEqual(math.pow(-1., -0.), 1.)
self.assertEqual(math.pow(-1., -2.), 1.)
self.assertRaises(ValueError, math.pow, -1., -2.3)
self.assertEqual(math.pow(-1., -3.), -1.)
self.assertEqual(math.pow(-1., NINF), 1.)
self.assertTrue(math.isnan(math.pow(-1., NAN)))
# pow(1, x)
self.assertEqual(math.pow(1., INF), 1.)
self.assertEqual(math.pow(1., 3.), 1.)
self.assertEqual(math.pow(1., 2.3), 1.)
self.assertEqual(math.pow(1., 2.), 1.)
self.assertEqual(math.pow(1., 0.), 1.)
self.assertEqual(math.pow(1., -0.), 1.)
self.assertEqual(math.pow(1., -2.), 1.)
self.assertEqual(math.pow(1., -2.3), 1.)
self.assertEqual(math.pow(1., -3.), 1.)
self.assertEqual(math.pow(1., NINF), 1.)
self.assertEqual(math.pow(1., NAN), 1.)
# pow(x, 0) should be 1 for any x
self.assertEqual(math.pow(2.3, 0.), 1.)
self.assertEqual(math.pow(-2.3, 0.), 1.)
self.assertEqual(math.pow(NAN, 0.), 1.)
self.assertEqual(math.pow(2.3, -0.), 1.)
self.assertEqual(math.pow(-2.3, -0.), 1.)
self.assertEqual(math.pow(NAN, -0.), 1.)
# pow(x, y) is invalid if x is negative and y is not integral
self.assertRaises(ValueError, math.pow, -1., 2.3)
self.assertRaises(ValueError, math.pow, -15., -3.1)
# pow(x, NINF)
self.assertEqual(math.pow(1.9, NINF), 0.)
self.assertEqual(math.pow(1.1, NINF), 0.)
self.assertEqual(math.pow(0.9, NINF), INF)
self.assertEqual(math.pow(0.1, NINF), INF)
self.assertEqual(math.pow(-0.1, NINF), INF)
self.assertEqual(math.pow(-0.9, NINF), INF)
self.assertEqual(math.pow(-1.1, NINF), 0.)
self.assertEqual(math.pow(-1.9, NINF), 0.)
# pow(x, INF)
self.assertEqual(math.pow(1.9, INF), INF)
self.assertEqual(math.pow(1.1, INF), INF)
self.assertEqual(math.pow(0.9, INF), 0.)
self.assertEqual(math.pow(0.1, INF), 0.)
self.assertEqual(math.pow(-0.1, INF), 0.)
self.assertEqual(math.pow(-0.9, INF), 0.)
self.assertEqual(math.pow(-1.1, INF), INF)
self.assertEqual(math.pow(-1.9, INF), INF)
# pow(x, y) should work for x negative, y an integer
self.ftest('(-2.)**3.', math.pow(-2.0, 3.0), -8.0)
self.ftest('(-2.)**2.', math.pow(-2.0, 2.0), 4.0)
self.ftest('(-2.)**1.', math.pow(-2.0, 1.0), -2.0)
self.ftest('(-2.)**0.', math.pow(-2.0, 0.0), 1.0)
self.ftest('(-2.)**-0.', math.pow(-2.0, -0.0), 1.0)
self.ftest('(-2.)**-1.', math.pow(-2.0, -1.0), -0.5)
self.ftest('(-2.)**-2.', math.pow(-2.0, -2.0), 0.25)
self.ftest('(-2.)**-3.', math.pow(-2.0, -3.0), -0.125)
self.assertRaises(ValueError, math.pow, -2.0, -0.5)
self.assertRaises(ValueError, math.pow, -2.0, 0.5)
# the following tests have been commented out since they don't
# really belong here: the implementation of ** for floats is
# independent of the implementation of math.pow
#self.assertEqual(1**NAN, 1)
#self.assertEqual(1**INF, 1)
#self.assertEqual(1**NINF, 1)
#self.assertEqual(1**0, 1)
#self.assertEqual(1.**NAN, 1)
#self.assertEqual(1.**INF, 1)
#self.assertEqual(1.**NINF, 1)
#self.assertEqual(1.**0, 1)
def testRadians(self):
self.assertRaises(TypeError, math.radians)
self.ftest('radians(180)', math.radians(180), math.pi)
self.ftest('radians(90)', math.radians(90), math.pi/2)
self.ftest('radians(-45)', math.radians(-45), -math.pi/4)
def testSin(self):
self.assertRaises(TypeError, math.sin)
self.ftest('sin(0)', math.sin(0), 0)
self.ftest('sin(pi/2)', math.sin(math.pi/2), 1)
self.ftest('sin(-pi/2)', math.sin(-math.pi/2), -1)
try:
self.assertTrue(math.isnan(math.sin(INF)))
self.assertTrue(math.isnan(math.sin(NINF)))
except ValueError:
self.assertRaises(ValueError, math.sin, INF)
self.assertRaises(ValueError, math.sin, NINF)
self.assertTrue(math.isnan(math.sin(NAN)))
def testSinh(self):
self.assertRaises(TypeError, math.sinh)
self.ftest('sinh(0)', math.sinh(0), 0)
self.ftest('sinh(1)**2-cosh(1)**2', math.sinh(1)**2-math.cosh(1)**2, -1)
self.ftest('sinh(1)+sinh(-1)', math.sinh(1)+math.sinh(-1), 0)
self.assertEqual(math.sinh(INF), INF)
self.assertEqual(math.sinh(NINF), NINF)
self.assertTrue(math.isnan(math.sinh(NAN)))
def testSqrt(self):
self.assertRaises(TypeError, math.sqrt)
self.ftest('sqrt(0)', math.sqrt(0), 0)
self.ftest('sqrt(1)', math.sqrt(1), 1)
self.ftest('sqrt(4)', math.sqrt(4), 2)
self.assertEqual(math.sqrt(INF), INF)
self.assertRaises(ValueError, math.sqrt, NINF)
self.assertTrue(math.isnan(math.sqrt(NAN)))
def testTan(self):
self.assertRaises(TypeError, math.tan)
self.ftest('tan(0)', math.tan(0), 0)
self.ftest('tan(pi/4)', math.tan(math.pi/4), 1)
self.ftest('tan(-pi/4)', math.tan(-math.pi/4), -1)
try:
self.assertTrue(math.isnan(math.tan(INF)))
self.assertTrue(math.isnan(math.tan(NINF)))
except:
self.assertRaises(ValueError, math.tan, INF)
self.assertRaises(ValueError, math.tan, NINF)
self.assertTrue(math.isnan(math.tan(NAN)))
def testTanh(self):
self.assertRaises(TypeError, math.tanh)
self.ftest('tanh(0)', math.tanh(0), 0)
self.ftest('tanh(1)+tanh(-1)', math.tanh(1)+math.tanh(-1), 0)
self.ftest('tanh(inf)', math.tanh(INF), 1)
self.ftest('tanh(-inf)', math.tanh(NINF), -1)
self.assertTrue(math.isnan(math.tanh(NAN)))
@requires_IEEE_754
@unittest.skipIf(sysconfig.get_config_var('TANH_PRESERVES_ZERO_SIGN') == 0,
"system tanh() function doesn't copy the sign")
def testTanhSign(self):
# check that tanh(-0.) == -0. on IEEE 754 systems
self.assertEqual(math.tanh(-0.), -0.)
self.assertEqual(math.copysign(1., math.tanh(-0.)),
math.copysign(1., -0.))
def test_trunc(self):
self.assertEqual(math.trunc(1), 1)
self.assertEqual(math.trunc(-1), -1)
self.assertEqual(type(math.trunc(1)), int)
self.assertEqual(type(math.trunc(1.5)), int)
self.assertEqual(math.trunc(1.5), 1)
self.assertEqual(math.trunc(-1.5), -1)
self.assertEqual(math.trunc(1.999999), 1)
self.assertEqual(math.trunc(-1.999999), -1)
self.assertEqual(math.trunc(-0.999999), -0)
self.assertEqual(math.trunc(-100.999), -100)
class TestTrunc(object):
def __trunc__(self):
return 23
class TestNoTrunc(object):
pass
self.assertEqual(math.trunc(TestTrunc()), 23)
self.assertRaises(TypeError, math.trunc)
self.assertRaises(TypeError, math.trunc, 1, 2)
self.assertRaises(TypeError, math.trunc, TestNoTrunc())
def testIsfinite(self):
self.assertTrue(math.isfinite(0.0))
self.assertTrue(math.isfinite(-0.0))
self.assertTrue(math.isfinite(1.0))
self.assertTrue(math.isfinite(-1.0))
self.assertFalse(math.isfinite(float("nan")))
self.assertFalse(math.isfinite(float("inf")))
self.assertFalse(math.isfinite(float("-inf")))
def testIsnan(self):
self.assertTrue(math.isnan(float("nan")))
self.assertTrue(math.isnan(float("inf")* 0.))
self.assertFalse(math.isnan(float("inf")))
self.assertFalse(math.isnan(0.))
self.assertFalse(math.isnan(1.))
def testIsinf(self):
self.assertTrue(math.isinf(float("inf")))
self.assertTrue(math.isinf(float("-inf")))
self.assertTrue(math.isinf(1E400))
self.assertTrue(math.isinf(-1E400))
self.assertFalse(math.isinf(float("nan")))
self.assertFalse(math.isinf(0.))
self.assertFalse(math.isinf(1.))
# RED_FLAG 16-Oct-2000 Tim
# While 2.0 is more consistent about exceptions than previous releases, it
# still fails this part of the test on some platforms. For now, we only
# *run* test_exceptions() in verbose mode, so that this isn't normally
# tested.
if verbose:
def test_exceptions(self):
try:
x = math.exp(-1000000000)
except:
# mathmodule.c is failing to weed out underflows from libm, or
# we've got an fp format with huge dynamic range
self.fail("underflowing exp() should not have raised "
"an exception")
if x != 0:
self.fail("underflowing exp() should have returned 0")
# If this fails, probably using a strict IEEE-754 conforming libm, and x
# is +Inf afterwards. But Python wants overflows detected by default.
try:
x = math.exp(1000000000)
except OverflowError:
pass
else:
self.fail("overflowing exp() didn't trigger OverflowError")
# If this fails, it could be a puzzle. One odd possibility is that
# mathmodule.c's macros are getting confused while comparing
# Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
# as a result (and so raising OverflowError instead).
try:
x = math.sqrt(-1.0)
except ValueError:
pass
else:
self.fail("sqrt(-1) didn't raise ValueError")
@requires_IEEE_754
def test_testfile(self):
for id, fn, ar, ai, er, ei, flags in parse_testfile(test_file):
# Skip if either the input or result is complex, or if
# flags is nonempty
if ai != 0. or ei != 0. or flags:
continue
if fn in ['rect', 'polar']:
# no real versions of rect, polar
continue
func = getattr(math, fn)
try:
result = func(ar)
except ValueError as exc:
message = (("Unexpected ValueError: %s\n " +
"in test %s:%s(%r)\n") % (exc.args[0], id, fn, ar))
self.fail(message)
except OverflowError:
message = ("Unexpected OverflowError in " +
"test %s:%s(%r)\n" % (id, fn, ar))
self.fail(message)
self.ftest("%s:%s(%r)" % (id, fn, ar), result, er)
@requires_IEEE_754
def test_mtestfile(self):
ALLOWED_ERROR = 20 # permitted error, in ulps
fail_fmt = "{}:{}({!r}): expected {!r}, got {!r}"
failures = []
for id, fn, arg, expected, flags in parse_mtestfile(math_testcases):
func = getattr(math, fn)
if 'invalid' in flags or 'divide-by-zero' in flags:
expected = 'ValueError'
elif 'overflow' in flags:
expected = 'OverflowError'
try:
got = func(arg)
except ValueError:
got = 'ValueError'
except OverflowError:
got = 'OverflowError'
accuracy_failure = None
if isinstance(got, float) and isinstance(expected, float):
if math.isnan(expected) and math.isnan(got):
continue
if not math.isnan(expected) and not math.isnan(got):
if fn == 'lgamma':
# we use a weaker accuracy test for lgamma;
# lgamma only achieves an absolute error of
# a few multiples of the machine accuracy, in
# general.
accuracy_failure = acc_check(expected, got,
rel_err = 5e-15,
abs_err = 5e-15)
elif fn == 'erfc':
# erfc has less-than-ideal accuracy for large
# arguments (x ~ 25 or so), mainly due to the
# error involved in computing exp(-x*x).
#
# XXX Would be better to weaken this test only
# for large x, instead of for all x.
accuracy_failure = ulps_check(expected, got, 2000)
else:
accuracy_failure = ulps_check(expected, got, 20)
if accuracy_failure is None:
continue
if isinstance(got, str) and isinstance(expected, str):
if got == expected:
continue
fail_msg = fail_fmt.format(id, fn, arg, expected, got)
if accuracy_failure is not None:
fail_msg += ' ({})'.format(accuracy_failure)
failures.append(fail_msg)
if failures:
self.fail('Failures in test_mtestfile:\n ' +
'\n '.join(failures))
def test_main():
from doctest import DocFileSuite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MathTests))
suite.addTest(DocFileSuite("ieee754.txt"))
run_unittest(suite)
if __name__ == '__main__':
test_main()
|
apache-2.0
|
carvalhomb/tsmells
|
fetch/scripts/postprocessing/aggregateVersionMetricData.py
|
1
|
8766
|
# !/usr/bin/python
# This file is part of snavtofamix (Source Navigator to FAMIX).
#
# snavtofamix is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# snavtofamix is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with snavtofamix; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Copyright 2006,2007 Bart Van Rompaey <[email protected]>,
# Bart Du Bois <[email protected]>
#
# This module writes metrics data on several versions (metrics as columns) to
# files for each metric (versions as columns).
# The resulting files can be immediately read by R as tables,
# for which boxplots across multiple versions can be drawn easily
# for each metric:
#
# data <- read.table(outputFileName,header=TRUE)
# boxplot(data,horizontal=TRUE)
# summary(data)
#######################################################################
# imports
#######################################################################
# python std + external libs
import sys
import os
#######################################################################
# global variables
#######################################################################
# first columns: ID (0), name (1), filename (2) => 3
classIdIndex = 0
classNameIndex = classIdIndex+1
fileNameIndex = classNameIndex + 1
firstMetricColumnIndex = fileNameIndex + 1
#######################################################################
# helper functions
#######################################################################
# get list of file info objects for files of particular extensions"
def listDirectory(directory, fileExtList):
fileList = [os.path.normcase(f) for f in os.listdir(directory)]
fileList = [os.path.join(directory, f) for f in fileList \
if os.path.splitext(f)[1] in fileExtList]
return fileList
#######################################################################
# main
#######################################################################
# check command line arguments
if len(sys.argv) < 3:
print "Usage: python",sys.argv[0],"inputDir","outputDir"
sys.exit(1)
else:
inputDir = sys.argv[1].rstrip("/")
if not os.path.isdir(inputDir):
print "Input directory \"" + inputDir + "\" does not exist."
sys.exit(1)
outputDir = sys.argv[2].rstrip("/")
if not os.path.isdir(outputDir):
print "Output directory \"" + outputDir + "\" does not exist."
sys.exit(1)
# collect list of files and versions
fileList=listDirectory(inputDir, ['.txt'])
versionNames = [os.path.splitext(os.path.split(fileName)[1])[0] for fileName in fileList]
print "Versions:",versionNames
# map files to versions
versions = {} # fileName x versionName
fileIndex = 0
while fileIndex < len(fileList):
versions[fileList[fileIndex]] = versionNames[fileIndex]
fileIndex +=1
allData = {} # metricName x version [ x className@fileName x metricValue ]
columnNames = []
metricNames = []
allUniqueClassNames = []
print "Retrieving raw data...",
# retrieve and store all data
maxNrOfDataInVersions=0
for fileName in fileList:
file = open(fileName,'r')
versionName = versions[fileName]
isColumnHeaderLine=True
nrOfDataInCurrentVersion=0
for line in file:
line = line.strip()
if isColumnHeaderLine:
#retrieve metric names from column header line
columnNames=line.split(" ")
metricNames=columnNames[firstMetricColumnIndex:len(columnNames)]
for metricName in metricNames:
if not(metricName in allData):
allData[metricName] = {}
allData[metricName][versionName] = {}
isColumnHeaderLine=False
continue
# line is not a column header
className = line.split("\t")[classNameIndex].strip("'")
fileName = line.split("\t")[fileNameIndex].strip("'")
uniqueClassName = "@".join([className, fileName])
if not uniqueClassName in allUniqueClassNames:
allUniqueClassNames.append(uniqueClassName)
metricValues = line.split("\t")[firstMetricColumnIndex:len(columnNames)]
metricIndex=0
for metricValue in metricValues:
allData[metricNames[metricIndex]][versionName][uniqueClassName] = int(metricValue)
metricIndex += 1
nrOfDataInCurrentVersion += 1
maxNrOfDataInVersions=max(maxNrOfDataInVersions,nrOfDataInCurrentVersion)
file.close()
print "[retrieved]"
print "Metrics:", metricNames
sortedClassNames = {} # metricName x [ uniqueClassName ]
print "Sorting data by metric value of initial version...",
# sort allData[metricName][version1] by the metricValue of each uniqueClassName
for metricName in metricNames:
sortedClassNames[metricName] = []
versionIndex = 0
versionName = versionNames[versionIndex]
metricValueDict = allData[metricName][versionName]
sortedMetricValueDict = {}
# create a map of [metricValue, uniqueClassName] items sorted descending
# by metricValue
aMap = map(lambda t: list(t),metricValueDict.items()) # keys map
map(lambda r: r.reverse(),aMap) # values map
aMap.sort()
aMap.reverse()
itemIndex = 0
while itemIndex < len(aMap):
curItem = aMap[itemIndex]
uniqueClassName = curItem[1]
metricValue = curItem[0]
sortedMetricValueDict[uniqueClassName] = metricValue
if not(uniqueClassName in sortedClassNames[metricName]):
sortedClassNames[metricName].append(uniqueClassName)
itemIndex += 1
# replace the old dict with the sorted version
allData[metricName][versionName] = sortedMetricValueDict
# for each i in 2:length(versions)
# sort allData[metricName][version_i] by the order of allData[metricName][version_i-1],
# and further on by the metricValue of each left-over uniqueClassName
for metricName in metricNames:
versionIndex = 1
while versionIndex < len(versionNames):
versionName = versionNames[versionIndex]
metricValueDict = allData[metricName][versionName]
sortedMetricValueDict = {}
# create a map of [metricValue, uniqueClassName] items sorted descending
# by metricValue
aMap = map(lambda t: list(t),metricValueDict.items()) # keys map
map(lambda r: r.reverse(),aMap) # values map
aMap.sort()
aMap.reverse()
itemIndex = 0
while itemIndex < len(aMap):
curItem = aMap[itemIndex]
uniqueClassName = curItem[1]
metricValue = curItem[0]
sortedMetricValueDict[uniqueClassName] = metricValue
if not(uniqueClassName in sortedClassNames[metricName]):
sortedClassNames[metricName].append(uniqueClassName)
itemIndex += 1
# metric value dict sorted relative to previous version
relativeSortedMetricValueDict = {}
handledClassNames = []
# iterate over class names of previous version
for uniqueClassName in allData[metricName][versionNames[versionIndex-1]]:
if uniqueClassName in sortedMetricValueDict:
relativeSortedMetricValueDict[uniqueClassName] = sortedMetricValueDict[uniqueClassName]
handledClassNames.append(uniqueClassName)
# iterate over class names of previous version
for uniqueClassName in sortedMetricValueDict:
if not(uniqueClassName in handledClassNames):
relativeSortedMetricValueDict[uniqueClassName] = sortedMetricValueDict[uniqueClassName]
# replace the old dict with the relatively sorted version
allData[metricName][versionName] = relativeSortedMetricValueDict
versionIndex += 1
print "[sorted]"
# write all data
metricIndex = 0
for metricName in metricNames:
outputFileName = os.path.join(outputDir,metricName) + ".txt"
print "Exporting",metricName,"data to file",outputFileName+"...",
outputFile = open(outputFileName,'w')
outputFile.write("uniqueClassName ")
for versionName in versionNames:
outputFile.write(versionName)
isLastVersionName = (versionName == versionNames[len(versionNames)-1])
if isLastVersionName:
outputFile.write("\n")
else:
outputFile.write(" ")
for uniqueClassName in sortedClassNames[metricName]:
outputFile.write("'" + uniqueClassName + "' ")
for versionName in versionNames:
curData = "NA"
versionHasData = uniqueClassName in allData[metricName][versionName]
if versionHasData:
curData = `allData[metricName][versionName][uniqueClassName]`
outputFile.write(curData)
isLastVersionName = (versionName == versionNames[len(versionNames)-1])
if isLastVersionName:
outputFile.write("\n")
else:
outputFile.write(" ")
outputFile.close()
print "[done]"
metricIndex += 1
|
gpl-2.0
|
chainer/chainer
|
chainer/functions/activation/softmax.py
|
8
|
3591
|
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
import chainer.functions
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
_algorithm = cuda.libcudnn.CUDNN_SOFTMAX_ACCURATE
class Softmax(function_node.FunctionNode):
"""Softmax activation function."""
def __init__(self, axis=1):
self.axis = axis
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type, = in_types
type_check.expect(
x_type.dtype.kind == 'f',
-x_type.ndim <= self.axis < x_type.ndim,
)
def forward(self, x):
xp = backend.get_array_module(*x)
if xp is cuda.cupy and chainer.should_use_cudnn('>=auto'):
y = cudnn.softmax_forward(x[0], self.axis, _algorithm)
else:
y = x[0] - x[0].max(axis=self.axis, keepdims=True)
xp.exp(y, out=y)
y /= y.sum(axis=self.axis, keepdims=True)
self.retain_outputs((0,))
return y,
def backward(self, indexes, grad_outputs):
y = self.get_retained_outputs()[0]
gy, = grad_outputs
return _SoftmaxGrad(self.axis).apply((y, gy))
class _SoftmaxGrad(function_node.FunctionNode):
def __init__(self, axis):
self.axis = axis
def forward(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
xp = backend.get_array_module(*y)
if xp is cuda.cupy and chainer.should_use_cudnn('>=auto'):
gx = cudnn.softmax_backward(y, gy, self.axis, _algorithm)
else:
gx = y * gy
sumdx = gx.sum(axis=self.axis, keepdims=True)
gx -= y * sumdx
return gx,
def backward(self, indexes, grad_outputs):
y, gy = self.get_retained_inputs()
ggx, = grad_outputs
gs = chainer.functions.sum(ggx * y, axis=self.axis, keepdims=True)
ga = ggx - chainer.functions.broadcast_to(gs, gy.shape)
ret = []
if 0 in indexes:
s = chainer.functions.broadcast_to(chainer.functions.sum(
y * gy, axis=self.axis, keepdims=True), gy.shape)
gy2 = ga * gy - ggx * s
ret.append(gy2)
if 1 in indexes:
ggy = ga * y
ret.append(ggy)
return tuple(ret)
def softmax(x, axis=1):
"""Softmax function.
This function computes its softmax along an axis. Let
:math:`c = (c_1, c_2, \\dots, c_D)` be the slice of ``x`` along with
the axis. For each slice :math:`c`, it computes the function :math:`f(c)`
defined as :math:`f(c)={\\exp(c) \\over \\sum_{d} \\exp(c_d)}`.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable.
A :math:`n`-dimensional (:math:`n \\geq 2`) float array.
axis (int): The axis along which the softmax is to be computed.
Returns:
~chainer.Variable: Output variable.
A :math:`n`-dimensional (:math:`n \\geq 2`) float array, which is the
same shape with x.
.. admonition:: Example
>>> x = np.array([[0, 1, 2], [0, 2, 4]], np.float32)
>>> x
array([[0., 1., 2.],
[0., 2., 4.]], dtype=float32)
>>> y = F.softmax(x, axis=1)
>>> y.array
array([[0.09003057, 0.24472848, 0.66524094],
[0.01587624, 0.11731043, 0.86681336]], dtype=float32)
>>> F.sum(y, axis=1).array
array([1., 1.], dtype=float32)
"""
return Softmax(axis=axis).apply((x,))[0]
|
mit
|
clemux/debsources
|
debsources/plugins/hook_hello.py
|
6
|
1244
|
# Copyright (C) 2013-2014 The Debsources developers <[email protected]>.
# See the AUTHORS file at the top-level directory of this distribution and at
# https://anonscm.debian.org/gitweb/?p=qa/debsources.git;a=blob;f=AUTHORS;hb=HEAD
#
# This file is part of Debsources. Debsources is free software: you can
# redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version. For more information
# see the COPYING file at the top-level directory of this distribution and at
# https://anonscm.debian.org/gitweb/?p=qa/debsources.git;a=blob;f=COPYING;hb=HEAD
from __future__ import absolute_import
import logging
conf = None
def add_package(session, pkg, pkgdir, file_table):
global conf
logging.debug('add-package %s %s' % (pkg, pkgdir))
def rm_package(session, pkg, pkgdir, file_table):
global conf
logging.debug('rm-package %s %s' % (pkg, pkgdir))
def init_plugin(debsources):
global conf
conf = debsources['config']
debsources['subscribe']('add-package', add_package, title='hello')
debsources['subscribe']('rm-package', rm_package, title='hello')
|
agpl-3.0
|
biddisco/VTK
|
Filters/General/Testing/Python/spatialRepAll.py
|
21
|
3220
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class spatialRepAll(vtk.test.Testing.vtkTest):
def testspatialRepAll(self):
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren)
asource = vtk.vtkSTLReader()
asource.SetFileName(VTK_DATA_ROOT + "/Data/42400-IDGH.stl")
dataMapper = vtk.vtkPolyDataMapper()
dataMapper.SetInputConnection(asource.GetOutputPort())
model = vtk.vtkActor()
model.SetMapper(dataMapper)
model.GetProperty().SetColor(1, 0, 0)
model.VisibilityOn()
locators = ["vtkPointLocator", "vtkCellLocator", "vtkOBBTree"]
locator = list()
boxes = list()
boxMapper = list()
boxActor = list()
for idx, vtkLocatorType in enumerate(locators):
eval('locator.append(vtk.' + vtkLocatorType + '())')
locator[idx].AutomaticOff()
locator[idx].SetMaxLevel(3)
boxes.append(vtk.vtk.vtkSpatialRepresentationFilter())
boxes[idx].SetInputConnection(asource.GetOutputPort())
boxes[idx].SetSpatialRepresentation(locator[idx])
boxes[idx].SetGenerateLeaves(1)
boxes[idx].Update()
output = boxes[idx].GetOutput().GetBlock(boxes[idx].GetMaximumLevel() + 1)
boxMapper.append(vtk.vtkPolyDataMapper())
boxMapper[idx].SetInputData(output)
boxActor.append(vtk.vtkActor())
boxActor[idx].SetMapper(boxMapper[idx])
boxActor[idx].AddPosition((idx + 1) * 15, 0, 0)
ren.AddActor(boxActor[idx])
ren.AddActor(model)
ren.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(400, 160)
# render the image
camera = vtk.vtkCamera()
camera.SetPosition(148.579, 136.352, 214.961)
camera.SetFocalPoint(151.889, 86.3178, 223.333)
camera.SetViewAngle(30)
camera.SetViewUp(0, 0, -1)
camera.SetClippingRange(1, 100)
ren.SetActiveCamera(camera)
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "spatialRepAll.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(spatialRepAll, 'test')])
|
bsd-3-clause
|
openstates/openstates.org
|
graphapi/middleware.py
|
1
|
1898
|
import logging
from graphql.language.ast import FragmentSpread, Variable
class QueryCostException(Exception):
pass
log = logging.getLogger("graphapi")
def _get_counts(info, fragments, variable_values):
multiplier = 1
inner_multiplier = 0
if isinstance(info, FragmentSpread):
for selection in fragments[info.name.value].selection_set.selections:
inner_multiplier += _get_counts(selection, fragments, variable_values)
else:
# the multiplier is either 1 or the number of elements returned
for argument in info.arguments:
if argument.name.value in ("first", "last"):
if isinstance(argument.value, Variable):
multiplier = variable_values[argument.value.name.value]
else:
multiplier = int(argument.value.value)
# count up how many multi-nodes inside
if info.selection_set:
for selection in info.selection_set.selections:
inner_multiplier += _get_counts(selection, fragments, variable_values)
# if this wasn't a multi-node, this counts as one node
if inner_multiplier == 0:
inner_multiplier = 1
return multiplier * inner_multiplier
class QueryProtectionMiddleware(object):
def __init__(self, max_cost=5000):
self.max_cost = max_cost
def resolve(self, next, root, info, **args):
if root is None:
count = _get_counts(
info.field_asts[0], info.fragments, info.variable_values
)
log.debug(
f"graphql query name={info.field_name} asts={info.field_asts} cost={count}"
)
if count > self.max_cost:
raise QueryCostException(
f"Query Cost is too high ({count}), limit is {self.max_cost}"
)
return next(root, info, **args)
|
mit
|
dbckz/ansible
|
lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py
|
33
|
34892
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vpc_nat_gateway
short_description: Manage AWS VPC NAT Gateways.
description:
- Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids.
version_added: "2.2"
requirements: [boto3, botocore]
options:
state:
description:
- Ensure NAT Gateway is present or absent.
required: false
default: "present"
choices: ["present", "absent"]
nat_gateway_id:
description:
- The id AWS dynamically allocates to the NAT Gateway on creation.
This is required when the absent option is present.
required: false
default: None
subnet_id:
description:
- The id of the subnet to create the NAT Gateway in. This is required
with the present option.
required: false
default: None
allocation_id:
description:
- The id of the elastic IP allocation. If this is not passed and the
eip_address is not passed. An EIP is generated for this NAT Gateway.
required: false
default: None
eip_address:
description:
- The elastic IP address of the EIP you want attached to this NAT Gateway.
If this is not passed and the allocation_id is not passed,
an EIP is generated for this NAT Gateway.
required: false
if_exist_do_not_create:
description:
- if a NAT Gateway exists already in the subnet_id, then do not create a new one.
required: false
default: false
release_eip:
description:
- Deallocate the EIP from the VPC.
- Option is only valid with the absent state.
- You should use this with the wait option. Since you can not release an address while a delete operation is happening.
required: false
default: true
wait:
description:
- Wait for operation to complete before returning.
required: false
default: false
wait_timeout:
description:
- How many seconds to wait for an operation to complete before timing out.
required: false
default: 300
client_token:
description:
- Optional unique token to be used during create to ensure idempotency.
When specifying this option, ensure you specify the eip_address parameter
as well otherwise any subsequent runs will fail.
required: false
author:
- "Allen Sanabria (@linuxdynasty)"
- "Jon Hadfield (@jonhadfield)"
- "Karen Cheng(@Etherdaemon)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create new nat gateway with client token.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
eip_address: 52.1.1.1
region: ap-southeast-2
client_token: abcd-12345678
register: new_nat_gateway
- name: Create new nat gateway using an allocation-id.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
allocation_id: eipalloc-12345678
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway, using an EIP address and wait for available status.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
eip_address: 52.1.1.1
wait: yes
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway and allocate new EIP.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
wait: yes
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
wait: yes
region: ap-southeast-2
if_exist_do_not_create: true
register: new_nat_gateway
- name: Delete nat gateway using discovered nat gateways from facts module.
ec2_vpc_nat_gateway:
state: absent
region: ap-southeast-2
wait: yes
nat_gateway_id: "{{ item.NatGatewayId }}"
release_eip: yes
register: delete_nat_gateway_result
with_items: "{{ gateways_to_remove.result }}"
- name: Delete nat gateway and wait for deleted status.
ec2_vpc_nat_gateway:
state: absent
nat_gateway_id: nat-12345678
wait: yes
wait_timeout: 500
region: ap-southeast-2
- name: Delete nat gateway and release EIP.
ec2_vpc_nat_gateway:
state: absent
nat_gateway_id: nat-12345678
release_eip: yes
wait: yes
wait_timeout: 300
region: ap-southeast-2
'''
RETURN = '''
create_time:
description: The ISO 8601 date time formatin UTC.
returned: In all cases.
type: string
sample: "2016-03-05T05:19:20.282000+00:00'"
nat_gateway_id:
description: id of the VPC NAT Gateway
returned: In all cases.
type: string
sample: "nat-0d1e3a878585988f8"
subnet_id:
description: id of the Subnet
returned: In all cases.
type: string
sample: "subnet-12345"
state:
description: The current state of the NAT Gateway.
returned: In all cases.
type: string
sample: "available"
vpc_id:
description: id of the VPC.
returned: In all cases.
type: string
sample: "vpc-12345"
nat_gateway_addresses:
description: List of dictionairies containing the public_ip, network_interface_id, private_ip, and allocation_id.
returned: In all cases.
type: string
sample: [
{
'public_ip': '52.52.52.52',
'network_interface_id': 'eni-12345',
'private_ip': '10.0.0.100',
'allocation_id': 'eipalloc-12345'
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn
import datetime
import random
import re
import time
from dateutil.tz import tzutc
try:
import botocore
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
DRY_RUN_GATEWAYS = [
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "available",
"create_time": "2016-03-05T05:19:20.282000+00:00",
"vpc_id": "vpc-12345678"
}
]
DRY_RUN_GATEWAY_UNCONVERTED = [
{
'VpcId': 'vpc-12345678',
'State': 'available',
'NatGatewayId': 'nat-123456789',
'SubnetId': 'subnet-123456789',
'NatGatewayAddresses': [
{
'PublicIp': '55.55.55.55',
'NetworkInterfaceId': 'eni-1234567',
'AllocationId': 'eipalloc-1234567',
'PrivateIp': '10.0.0.102'
}
],
'CreateTime': datetime.datetime(2016, 3, 5, 5, 19, 20, 282000, tzinfo=tzutc())
}
]
DRY_RUN_ALLOCATION_UNCONVERTED = {
'Addresses': [
{
'PublicIp': '55.55.55.55',
'Domain': 'vpc',
'AllocationId': 'eipalloc-1234567'
}
]
}
DRY_RUN_MSGS = 'DryRun Mode:'
def convert_to_lower(data):
"""Convert all uppercase keys in dict with lowercase_
Args:
data (dict): Dictionary with keys that have upper cases in them
Example.. FooBar == foo_bar
if a val is of type datetime.datetime, it will be converted to
the ISO 8601
Basic Usage:
>>> test = {'FooBar': []}
>>> test = convert_to_lower(test)
{
'foo_bar': []
}
Returns:
Dictionary
"""
results = dict()
if isinstance(data, dict):
for key, val in data.items():
key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower()
if key[0] == '_':
key = key[1:]
if isinstance(val, datetime.datetime):
results[key] = val.isoformat()
elif isinstance(val, dict):
results[key] = convert_to_lower(val)
elif isinstance(val, list):
converted = list()
for item in val:
converted.append(convert_to_lower(item))
results[key] = converted
else:
results[key] = val
return results
def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None,
states=None, check_mode=False):
"""Retrieve a list of NAT Gateways
Args:
client (botocore.client.EC2): Boto3 client
Kwargs:
subnet_id (str): The subnet_id the nat resides in.
nat_gateway_id (str): The Amazon nat id.
states (list): States available (pending, failed, available, deleting, and deleted)
default=None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-12345678'
>>> get_nat_gateways(client, subnet_id)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-12345678"
}
Returns:
Tuple (bool, str, list)
"""
params = dict()
err_msg = ""
gateways_retrieved = False
existing_gateways = list()
if not states:
states = ['available', 'pending']
if nat_gateway_id:
params['NatGatewayIds'] = [nat_gateway_id]
else:
params['Filter'] = [
{
'Name': 'subnet-id',
'Values': [subnet_id]
},
{
'Name': 'state',
'Values': states
}
]
try:
if not check_mode:
gateways = client.describe_nat_gateways(**params)['NatGateways']
if gateways:
for gw in gateways:
existing_gateways.append(convert_to_lower(gw))
gateways_retrieved = True
else:
gateways_retrieved = True
if nat_gateway_id:
if DRY_RUN_GATEWAYS[0]['nat_gateway_id'] == nat_gateway_id:
existing_gateways = DRY_RUN_GATEWAYS
elif subnet_id:
if DRY_RUN_GATEWAYS[0]['subnet_id'] == subnet_id:
existing_gateways = DRY_RUN_GATEWAYS
err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return gateways_retrieved, err_msg, existing_gateways
def wait_for_status(client, wait_timeout, nat_gateway_id, status,
check_mode=False):
"""Wait for the NAT Gateway to reach a status
Args:
client (botocore.client.EC2): Boto3 client
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
nat_gateway_id (str): The Amazon nat id.
status (str): The status to wait for.
examples. status=available, status=deleted
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-12345678'
>>> allocation_id = 'eipalloc-12345678'
>>> wait_for_status(client, subnet_id, allocation_id)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-1234567",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-12345678"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-12345677"
}
]
Returns:
Tuple (bool, str, dict)
"""
polling_increment_secs = 5
wait_timeout = time.time() + wait_timeout
status_achieved = False
nat_gateway = dict()
states = ['pending', 'failed', 'available', 'deleting', 'deleted']
err_msg = ""
while wait_timeout > time.time():
try:
gws_retrieved, err_msg, nat_gateways = (
get_nat_gateways(
client, nat_gateway_id=nat_gateway_id,
states=states, check_mode=check_mode
)
)
if gws_retrieved and nat_gateways:
nat_gateway = nat_gateways[0]
if check_mode:
nat_gateway['state'] = status
if nat_gateway.get('state') == status:
status_achieved = True
break
elif nat_gateway.get('state') == 'failed':
err_msg = nat_gateway.get('failure_message')
break
elif nat_gateway.get('state') == 'pending':
if 'failure_message' in nat_gateway:
err_msg = nat_gateway.get('failure_message')
status_achieved = False
break
else:
time.sleep(polling_increment_secs)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
if not status_achieved:
err_msg = "Wait time out reached, while waiting for results"
return status_achieved, err_msg, nat_gateway
def gateway_in_subnet_exists(client, subnet_id, allocation_id=None,
check_mode=False):
"""Retrieve all NAT Gateways for a subnet.
Args:
subnet_id (str): The subnet_id the nat resides in.
Kwargs:
allocation_id (str): The EIP Amazon identifier.
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-1234567'
>>> allocation_id = 'eipalloc-1234567'
>>> gateway_in_subnet_exists(client, subnet_id, allocation_id)
(
[
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-1234567"
}
],
False
)
Returns:
Tuple (list, bool)
"""
allocation_id_exists = False
gateways = []
states = ['available', 'pending']
gws_retrieved, _, gws = (
get_nat_gateways(
client, subnet_id, states=states, check_mode=check_mode
)
)
if not gws_retrieved:
return gateways, allocation_id_exists
for gw in gws:
for address in gw['nat_gateway_addresses']:
if allocation_id:
if address.get('allocation_id') == allocation_id:
allocation_id_exists = True
gateways.append(gw)
else:
gateways.append(gw)
return gateways, allocation_id_exists
def get_eip_allocation_id_by_address(client, eip_address, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
eip_address (str): The Elastic IP Address of the EIP.
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> eip_address = '52.87.29.36'
>>> get_eip_allocation_id_by_address(client, eip_address)
'eipalloc-36014da3'
Returns:
Tuple (str, str)
"""
params = {
'PublicIps': [eip_address],
}
allocation_id = None
err_msg = ""
try:
if not check_mode:
allocations = client.describe_addresses(**params)['Addresses']
if len(allocations) == 1:
allocation = allocations[0]
else:
allocation = None
else:
dry_run_eip = (
DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]['PublicIp']
)
if dry_run_eip == eip_address:
allocation = DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]
else:
allocation = None
if allocation:
if allocation.get('Domain') != 'vpc':
err_msg = (
"EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP"
.format(eip_address)
)
else:
allocation_id = allocation.get('AllocationId')
else:
err_msg = (
"EIP {0} does not exist".format(eip_address)
)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return allocation_id, err_msg
def allocate_eip_address(client, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> allocate_eip_address(client)
True
Returns:
Tuple (bool, str)
"""
ip_allocated = False
new_eip = None
err_msg = ''
params = {
'Domain': 'vpc',
}
try:
if check_mode:
ip_allocated = True
random_numbers = (
''.join(str(x) for x in random.sample(range(0, 9), 7))
)
new_eip = 'eipalloc-{0}'.format(random_numbers)
else:
new_eip = client.allocate_address(**params)['AllocationId']
ip_allocated = True
err_msg = 'eipalloc id {0} created'.format(new_eip)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return ip_allocated, err_msg, new_eip
def release_address(client, allocation_id, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
allocation_id (str): The eip Amazon identifier.
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> allocation_id = "eipalloc-123456"
>>> release_address(client, allocation_id)
True
Returns:
Boolean, string
"""
err_msg = ''
if check_mode:
return True, ''
ip_released = False
try:
client.describe_addresses(AllocationIds=[allocation_id])
except botocore.exceptions.ClientError as e:
# IP address likely already released
# Happens with gateway in 'deleted' state that
# still lists associations
return True, str(e)
try:
client.release_address(AllocationId=allocation_id)
ip_released = True
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return ip_released, err_msg
def create(client, subnet_id, allocation_id, client_token=None,
wait=False, wait_timeout=0, if_exist_do_not_create=False,
check_mode=False):
"""Create an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
subnet_id (str): The subnet_id the nat resides in.
allocation_id (str): The eip Amazon identifier.
Kwargs:
if_exist_do_not_create (bool): if a nat gateway already exists in this
subnet, than do not create another one.
default = False
wait (bool): Wait for the nat to be in the deleted state before returning.
default = False
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
default = 0
client_token (str):
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-1234567'
>>> allocation_id = 'eipalloc-1234567'
>>> create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-1234567",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-1234567"
}
]
Returns:
Tuple (bool, str, list)
"""
params = {
'SubnetId': subnet_id,
'AllocationId': allocation_id
}
request_time = datetime.datetime.utcnow()
changed = False
success = False
token_provided = False
err_msg = ""
if client_token:
token_provided = True
params['ClientToken'] = client_token
try:
if not check_mode:
result = client.create_nat_gateway(**params)["NatGateway"]
else:
result = DRY_RUN_GATEWAY_UNCONVERTED[0]
result['CreateTime'] = datetime.datetime.utcnow()
result['NatGatewayAddresses'][0]['AllocationId'] = allocation_id
result['SubnetId'] = subnet_id
success = True
changed = True
create_time = result['CreateTime'].replace(tzinfo=None)
if token_provided and (request_time > create_time):
changed = False
elif wait:
success, err_msg, result = (
wait_for_status(
client, wait_timeout, result['NatGatewayId'], 'available',
check_mode=check_mode
)
)
if success:
err_msg = (
'NAT gateway {0} created'.format(result['nat_gateway_id'])
)
except botocore.exceptions.ClientError as e:
if "IdempotentParameterMismatch" in e.message:
err_msg = (
'NAT Gateway does not support update and token has already been provided'
)
else:
err_msg = str(e)
success = False
changed = False
result = None
return success, changed, err_msg, result
def pre_create(client, subnet_id, allocation_id=None, eip_address=None,
if_exist_do_not_create=False, wait=False, wait_timeout=0,
client_token=None, check_mode=False):
"""Create an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
subnet_id (str): The subnet_id the nat resides in.
Kwargs:
allocation_id (str): The EIP Amazon identifier.
default = None
eip_address (str): The Elastic IP Address of the EIP.
default = None
if_exist_do_not_create (bool): if a nat gateway already exists in this
subnet, than do not create another one.
default = False
wait (bool): Wait for the nat to be in the deleted state before returning.
default = False
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
default = 0
client_token (str):
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-w4t12897'
>>> allocation_id = 'eipalloc-36014da3'
>>> pre_create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
[
true,
"",
{
"nat_gateway_id": "nat-03835afb6e31df79b",
"subnet_id": "subnet-w4t12897",
"nat_gateway_addresses": [
{
"public_ip": "52.87.29.36",
"network_interface_id": "eni-5579742d",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-36014da3"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-w68571b5"
}
]
Returns:
Tuple (bool, bool, str, list)
"""
success = False
changed = False
err_msg = ""
results = list()
if not allocation_id and not eip_address:
existing_gateways, allocation_id_exists = (
gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode)
)
if len(existing_gateways) > 0 and if_exist_do_not_create:
success = True
changed = False
results = existing_gateways[0]
err_msg = (
'NAT Gateway {0} already exists in subnet_id {1}'
.format(
existing_gateways[0]['nat_gateway_id'], subnet_id
)
)
return success, changed, err_msg, results
else:
success, err_msg, allocation_id = (
allocate_eip_address(client, check_mode=check_mode)
)
if not success:
return success, 'False', err_msg, dict()
elif eip_address or allocation_id:
if eip_address and not allocation_id:
allocation_id, err_msg = (
get_eip_allocation_id_by_address(
client, eip_address, check_mode=check_mode
)
)
if not allocation_id:
success = False
changed = False
return success, changed, err_msg, dict()
existing_gateways, allocation_id_exists = (
gateway_in_subnet_exists(
client, subnet_id, allocation_id, check_mode=check_mode
)
)
if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create):
success = True
changed = False
results = existing_gateways[0]
err_msg = (
'NAT Gateway {0} already exists in subnet_id {1}'
.format(
existing_gateways[0]['nat_gateway_id'], subnet_id
)
)
return success, changed, err_msg, results
success, changed, err_msg, results = create(
client, subnet_id, allocation_id, client_token,
wait, wait_timeout, if_exist_do_not_create, check_mode=check_mode
)
return success, changed, err_msg, results
def remove(client, nat_gateway_id, wait=False, wait_timeout=0,
release_eip=False, check_mode=False):
"""Delete an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
nat_gateway_id (str): The Amazon nat id.
Kwargs:
wait (bool): Wait for the nat to be in the deleted state before returning.
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc.
Basic Usage:
>>> client = boto3.client('ec2')
>>> nat_gw_id = 'nat-03835afb6e31df79b'
>>> remove(client, nat_gw_id, wait=True, wait_timeout=500, release_eip=True)
[
true,
"",
{
"nat_gateway_id": "nat-03835afb6e31df79b",
"subnet_id": "subnet-w4t12897",
"nat_gateway_addresses": [
{
"public_ip": "52.87.29.36",
"network_interface_id": "eni-5579742d",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-36014da3"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-w68571b5"
}
]
Returns:
Tuple (bool, str, list)
"""
params = {
'NatGatewayId': nat_gateway_id
}
success = False
changed = False
err_msg = ""
results = list()
states = ['pending', 'available' ]
try:
exist, _, gw = (
get_nat_gateways(
client, nat_gateway_id=nat_gateway_id,
states=states, check_mode=check_mode
)
)
if exist and len(gw) == 1:
results = gw[0]
if not check_mode:
client.delete_nat_gateway(**params)
allocation_id = (
results['nat_gateway_addresses'][0]['allocation_id']
)
changed = True
success = True
err_msg = (
'NAT gateway {0} is in a deleting state. Delete was successful'
.format(nat_gateway_id)
)
if wait:
status_achieved, err_msg, results = (
wait_for_status(
client, wait_timeout, nat_gateway_id, 'deleted',
check_mode=check_mode
)
)
if status_achieved:
err_msg = (
'NAT gateway {0} was deleted successfully'
.format(nat_gateway_id)
)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
if release_eip:
eip_released, eip_err = (
release_address(client, allocation_id, check_mode)
)
if not eip_released:
err_msg = (
"{0}: Failed to release EIP {1}: {2}"
.format(err_msg, allocation_id, eip_err)
)
success = False
return success, changed, err_msg, results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
subnet_id=dict(type='str'),
eip_address=dict(type='str'),
allocation_id=dict(type='str'),
if_exist_do_not_create=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=320, required=False),
release_eip=dict(type='bool', default=False),
nat_gateway_id=dict(type='str'),
client_token=dict(type='str'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['allocation_id', 'eip_address']
]
)
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='botocore/boto3 is required.')
state = module.params.get('state').lower()
check_mode = module.check_mode
subnet_id = module.params.get('subnet_id')
allocation_id = module.params.get('allocation_id')
eip_address = module.params.get('eip_address')
nat_gateway_id = module.params.get('nat_gateway_id')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
release_eip = module.params.get('release_eip')
client_token = module.params.get('client_token')
if_exist_do_not_create = module.params.get('if_exist_do_not_create')
try:
region, ec2_url, aws_connect_kwargs = (
get_aws_connection_info(module, boto3=True)
)
client = (
boto3_conn(
module, conn_type='client', resource='ec2',
region=region, endpoint=ec2_url, **aws_connect_kwargs
)
)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Boto3 Client Error - " + str(e.msg))
changed = False
err_msg = ''
if state == 'present':
if not subnet_id:
module.fail_json(msg='subnet_id is required for creation')
success, changed, err_msg, results = (
pre_create(
client, subnet_id, allocation_id, eip_address,
if_exist_do_not_create, wait, wait_timeout,
client_token, check_mode=check_mode
)
)
else:
if not nat_gateway_id:
module.fail_json(msg='nat_gateway_id is required for removal')
else:
success, changed, err_msg, results = (
remove(
client, nat_gateway_id, wait, wait_timeout, release_eip,
check_mode=check_mode
)
)
if not success:
module.fail_json(
msg=err_msg, success=success, changed=changed
)
else:
module.exit_json(
msg=err_msg, success=success, changed=changed, **results
)
if __name__ == '__main__':
main()
|
gpl-3.0
|
kevinmel2000/EmeraldBox
|
emerald/model_generator.py
|
2
|
5245
|
import os.path
import sys
from config import BASEDIR
from config import WHITE_SPACE
from config import SQLALCHEMY_MIGRATE_REPO
from database_operations import db_create, db_migrate
from controller_generator import generate_controller
from template_generator import generate_index_template
from template_generator import generate_controller_template
from template_generator import generate_edit_template
from template_generator import generate_view_template
def add_model(model_name, model_components):
# This is used to add model to the model file.
# Get the current model file and open it for writing.
model_path = os.path.join(BASEDIR, "app/models/" + model_name.lower() + ".py")
init_path = os.path.join(BASEDIR, "app/models/__init__.py")
model_file = open(model_path, 'w')
# Write the class definition.
model_file.write('from app import db\n\n')
model_file.write('class ' + model_name.title() + '(db.Model):\n')
model_file.write(WHITE_SPACE+'id = db.Column(db.Integer, primary_key=True)\n')
## Add the model fields.
### First check for the data types and standardize it.
for component in model_components:
in_type = component['field_property'][0].lower()
### The database field type based on http://docs.sqlalchemy.org/en/rel_0_7/core/types.html#types-generic.
if in_type == 'biginteger' or in_type == 'bigint':
data_type = 'BigInteger'
elif in_type=='int' or in_type=='integer':
data_type = 'Integer'
elif in_type == 'boolean':
data_type = 'Boolean'
elif in_type == 'date':
data_type = 'Date'
elif in_type == 'datetime':
data_type = 'DateTime'
elif in_type == 'enum':
data_type = 'Enum'
elif in_type == 'float':
data_type = 'Float'
elif in_type == 'interval':
data_type = 'Interval'
elif in_type == 'largebinary':
data_type = 'LargeBinary'
elif in_type == 'numeric':
data_type = 'Numeric'
elif in_type == 'pickletype':
data_type = 'PickleType'
elif in_type == 'schematype':
data_type = 'SchemaType'
elif in_type == 'smallinteger' or in_type == 'smallint':
data_type = 'SmallInteger'
elif in_type == 'string':
data_type = 'String'
elif in_type == 'text':
data_type = 'Text'
elif in_type == 'time':
data_type = 'Time'
elif in_type == 'unicode':
data_type = 'Unicode'
elif in_type == 'unicodetext':
data_type = 'UnicodeText'
elif in_type == 'binary':
data_type = 'Binary'
elif in_type == 'blob':
data_type = 'BLOB'
else:
### If the data type did not match any of the existing data types, display error message and quit the program.
print 'Data type ' + component['field_property'][0] + ' not found. Please refer to SQLAlchemy documentation for valid data types.'
sys.exit()
### If it matches write the model fields into the model files.
if len(component['field_property']) == 2:
model_file.write(WHITE_SPACE + component['field_name'].lower() + ' = db.Column(db.' + data_type + '(' + component['field_property'][1] + '))\n')
else:
model_file.write(WHITE_SPACE + component['field_name'].lower() + ' = db.Column(db.' + data_type + ')\n')
## Create the class method for data transfer object (dto) for JSON representation.
model_file.write('\n')
model_file.write(WHITE_SPACE + '# data transfer object to form JSON\n')
model_file.write(WHITE_SPACE + 'def dto(self):\n')
model_file.write(WHITE_SPACE + WHITE_SPACE + 'return dict(\n')
### Add the json component for all fields.
mod_counter = 1
model_file.write(WHITE_SPACE+WHITE_SPACE+WHITE_SPACE+'id = self.id,\n')
max_mod_index = len(model_components)
for component in model_components:
if mod_counter != max_mod_index:
model_file.write(WHITE_SPACE + WHITE_SPACE + WHITE_SPACE + component['field_name'].lower() + ' = self.' + component['field_name'].lower() + ',\n')
else:
model_file.write(WHITE_SPACE + WHITE_SPACE + WHITE_SPACE + component['field_name'].lower() + ' = self.' + component['field_name'].lower() + ')\n')
mod_counter = mod_counter + 1
model_file.close()
init_file = open(init_path, 'a')
init_file.write("from "+ model_name.lower() + " import " + model_name.title()+"\n")
init_file.close()
print '\n...........\n'
#add the CRUD controllers
generate_controller(model_name, model_components)
generate_index_template(model_name, model_components)
generate_controller_template(model_name, model_components)
generate_edit_template(model_name, model_components)
generate_view_template(model_name, model_components)
# perform the database creation and migration.
# this will be based on the state of the database.
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
db_create()
db_migrate()
print "Please run box.py -m to complete the migration process"
# end of file
|
mit
|
openvapour/ryu
|
ryu/lib/packet/packet.py
|
9
|
4718
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import struct
from . import packet_base
from . import ethernet
class Packet(object):
"""A packet decoder/encoder class.
An instance is used to either decode or encode a single packet.
*data* is a bytearray to describe a raw datagram to decode.
When decoding, a Packet object is iteratable.
Iterated values are protocol (ethernet, ipv4, ...) headers and the payload.
Protocol headers are instances of subclass of packet_base.PacketBase.
The payload is a bytearray. They are iterated in on-wire order.
*data* should be omitted when encoding a packet.
"""
def __init__(self, data=None, protocols=None, parse_cls=ethernet.ethernet):
super(Packet, self).__init__()
self.data = data
if protocols is None:
self.protocols = []
else:
self.protocols = protocols
if self.data:
self._parser(parse_cls)
def _parser(self, cls):
rest_data = self.data
while cls:
try:
proto, cls, rest_data = cls.parser(rest_data)
except struct.error:
break
if proto:
self.protocols.append(proto)
if rest_data:
self.protocols.append(rest_data)
def serialize(self):
"""Encode a packet and store the resulted bytearray in self.data.
This method is legal only when encoding a packet.
"""
self.data = bytearray()
r = self.protocols[::-1]
for i, p in enumerate(r):
if isinstance(p, packet_base.PacketBase):
if i == len(r) - 1:
prev = None
else:
prev = r[i + 1]
data = p.serialize(self.data, prev)
else:
data = str(p)
self.data = data + self.data
def add_protocol(self, proto):
"""Register a protocol *proto* for this packet.
This method is legal only when encoding a packet.
When encoding a packet, register a protocol (ethernet, ipv4, ...)
header to add to this packet.
Protocol headers should be registered in on-wire order before calling
self.serialize.
"""
self.protocols.append(proto)
def get_protocols(self, protocol):
"""Returns a list of protocols that matches to the specified protocol.
"""
if isinstance(protocol, packet_base.PacketBase):
protocol = protocol.__class__
assert issubclass(protocol, packet_base.PacketBase)
return [p for p in self.protocols if isinstance(p, protocol)]
def get_protocol(self, protocol):
"""Returns the firstly found protocol that matches to the
specified protocol.
"""
result = self.get_protocols(protocol)
if len(result) > 0:
return result[0]
return None
def __div__(self, trailer):
self.add_protocol(trailer)
return self
def __truediv__(self, trailer):
return self.__div__(trailer)
def __iter__(self):
return iter(self.protocols)
def __getitem__(self, idx):
return self.protocols[idx]
def __setitem__(self, idx, item):
self.protocols[idx] = item
def __delitem__(self, idx):
del self.protocols[idx]
def __len__(self):
return len(self.protocols)
def __contains__(self, protocol):
if (inspect.isclass(protocol) and
issubclass(protocol, packet_base.PacketBase)):
return protocol in [p.__class__ for p in self.protocols]
return protocol in self.protocols
def __str__(self):
return ', '.join(repr(protocol) for protocol in self.protocols)
__repr__ = __str__ # note: str(list) uses __repr__ for elements
# XXX: Hack for preventing recursive import
def _PacketBase__div__(self, trailer):
pkt = Packet()
pkt.add_protocol(self)
pkt.add_protocol(trailer)
return pkt
packet_base.PacketBase.__div__ = _PacketBase__div__
packet_base.PacketBase.__truediv__ = _PacketBase__div__
|
apache-2.0
|
valintinr/opennode-knot
|
opennode/knot/model/machines.py
|
2
|
2123
|
from __future__ import absolute_import
from grokcore.component import context
from zope import schema
from zope.component import provideSubscriptionAdapter
from zope.interface import Interface, implements
from opennode.knot.model.compute import Compute
from opennode.knot.model.hangar import Hangar
from opennode.oms.model.model.actions import ActionsContainerExtension
from opennode.oms.model.model.base import Container, ContainerInjector, ReadonlyContainer
from opennode.oms.model.model.byname import ByNameContainerExtension
from opennode.oms.model.model.root import OmsRoot
from opennode.oms.security.directives import permissions
class Machines(Container):
__contains__ = Compute
__name__ = 'machines'
def __init__(self):
super(Machines, self).__init__()
def __str__(self):
return 'Machines list'
class MachinesRootInjector(ContainerInjector):
context(OmsRoot)
__class__ = Machines
class IIncomingMachineRequest(Interface):
hostname = schema.TextLine(title=u"Hostname", min_length=3)
class IncomingMachineRequest(ReadonlyContainer):
implements(IIncomingMachineRequest)
permissions(dict(hostname='read'))
def __init__(self, hostname):
self.__name__ = hostname
self.hostname = hostname
class IncomingMachines(ReadonlyContainer):
__name__ = 'incoming'
class IncomingMachinesInjector(ContainerInjector):
context(Machines)
__class__ = IncomingMachines
class BaseIncomingMachines(ReadonlyContainer):
"""Template method abstract class for stack-specific incoming machines list implementations"""
def _get(self):
""" Provide list of incoming host names """
raise NotImplemented
@property
def _items(self):
items = self._get()
pending = dict((h, IncomingMachineRequest(h)) for h in items)
return pending
class HangarMachinesInjector(ContainerInjector):
context(Machines)
__class__ = Hangar
provideSubscriptionAdapter(ByNameContainerExtension, adapts=(Machines, ))
provideSubscriptionAdapter(ActionsContainerExtension, adapts=(IncomingMachineRequest, ))
|
gpl-3.0
|
466152112/scikit-learn
|
sklearn/utils/tests/test_sparsefuncs.py
|
57
|
13752
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((rows.shape[0], X.shape[1]), dtype=np.float64)
assign_rows_csr(X, rows,
np.arange(out.shape[0], dtype=np.intp)[::-1], out)
assert_array_equal(out, X[rows].toarray()[::-1])
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
|
bsd-3-clause
|
vsoch/myconnectome
|
myconnectome/rsfmri/mk_parcellation_boundaries.py
|
2
|
1478
|
"""
make file showing boundaries from parcellation
"""
import numpy,nibabel
import nibabel.gifti.giftiio
import os
from myconnectome.utils import set_structure
basedir=os.environ['MYCONNECTOME_DIR']
def mk_parcellation_boundaries():
lh=os.path.join(basedir,'parcellation/all_selected_L_new_parcel_renumbered.func.gii')
rh=os.path.join(basedir,'parcellation/all_selected_R_new_parcel_renumbered.func.gii')
lhimg=nibabel.gifti.giftiio.read(lh)
rhimg=nibabel.gifti.giftiio.read(rh)
lhboundaries=numpy.zeros(lhimg.darrays[0].data.shape,dtype=numpy.float32)
lhboundaries[lhimg.darrays[0].data==0]=1
lhimg.darrays[0].data=lhboundaries
lhimg=nibabel.gifti.giftiio.write(lhimg,os.path.join(basedir,'parcellation/all_selected_L_new_parcel_renumbered_boundaries.func.gii'))
set_structure.set_structure(os.path.join(basedir,'parcellation/all_selected_L_new_parcel_renumbered_boundaries.func.gii'),'CORTEX_LEFT')
rhboundaries=numpy.zeros(rhimg.darrays[0].data.shape,dtype=numpy.float32)
rhboundaries[rhimg.darrays[0].data==0]=1
rhimg.darrays[0].data=rhboundaries
rhimg=nibabel.gifti.giftiio.write(rhimg,os.path.join(basedir,'parcellation/all_selected_R_new_parcel_renumbered_boundaries.func.gii'))
set_structure.set_structure(os.path.join(basedir,'parcellation/all_selected_R_new_parcel_renumbered_boundaries.func.gii'),'CORTEX_RIGHT')
if __name__ == "__main__":
mk_parcellation_boundaries()
|
mit
|
snnn/tensorflow
|
tensorflow/python/framework/device_test.py
|
71
|
6791
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import device
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class DeviceTest(test_util.TensorFlowTestCase):
def testEmpty(self):
d = device.DeviceSpec()
self.assertEquals("", d.to_string())
d.parse_from_string("")
self.assertEquals("", d.to_string())
def testConstructor(self):
d = device.DeviceSpec(job="j", replica=0, task=1,
device_type="CPU", device_index=2)
self.assertEqual("j", d.job)
self.assertEqual(0, d.replica)
self.assertEqual(1, d.task)
self.assertEqual("CPU", d.device_type)
self.assertEqual(2, d.device_index)
self.assertEqual("/job:j/replica:0/task:1/device:CPU:2", d.to_string())
d = device.DeviceSpec(device_type="GPU", device_index=0)
self.assertEquals("/device:GPU:0", d.to_string())
def testto_string(self):
d = device.DeviceSpec()
d.job = "foo"
self.assertEquals("/job:foo", d.to_string())
d.task = 3
self.assertEquals("/job:foo/task:3", d.to_string())
d.device_type = "CPU"
d.device_index = 0
self.assertEquals("/job:foo/task:3/device:CPU:0", d.to_string())
d.task = None
d.replica = 12
self.assertEquals("/job:foo/replica:12/device:CPU:0", d.to_string())
d.device_type = "GPU"
d.device_index = 2
self.assertEquals("/job:foo/replica:12/device:GPU:2", d.to_string())
d.device_type = "CPU"
d.device_index = 1
self.assertEquals("/job:foo/replica:12/device:CPU:1", d.to_string())
d.device_type = None
d.device_index = None
d.cpu = None
self.assertEquals("/job:foo/replica:12", d.to_string())
# Test wildcard
d = device.DeviceSpec(job="foo", replica=12, task=3, device_type="GPU")
self.assertEquals("/job:foo/replica:12/task:3/device:GPU:*", d.to_string())
def testParse(self):
d = device.DeviceSpec()
d.parse_from_string("/job:foo/replica:0")
self.assertEquals("/job:foo/replica:0", d.to_string())
d.parse_from_string("/replica:1/task:0/cpu:0")
self.assertEquals("/replica:1/task:0/device:CPU:0", d.to_string())
d.parse_from_string("/replica:1/task:0/device:CPU:0")
self.assertEquals("/replica:1/task:0/device:CPU:0", d.to_string())
d.parse_from_string("/job:muu/device:GPU:2")
self.assertEquals("/job:muu/device:GPU:2", d.to_string())
with self.assertRaises(Exception) as e:
d.parse_from_string("/job:muu/device:GPU:2/cpu:0")
self.assertTrue("Cannot specify multiple device" in str(e.exception))
def testFromString(self):
d = device.DeviceSpec.from_string("/job:foo/replica:0")
self.assertEquals("/job:foo/replica:0", d.to_string())
with self.assertRaises(Exception) as e:
d = device.DeviceSpec.from_string("/job:muu/device:GPU:2/cpu:0")
self.assertTrue("Cannot specify multiple device" in str(e.exception))
d = device.DeviceSpec.from_string("/job:foo/replica:0/task:3/cpu:*")
self.assertEquals(None, d.device_index)
d = device.DeviceSpec.from_string("/job:foo/replica:0/task:3/gpu:7")
self.assertEquals(7, d.device_index)
d = device.DeviceSpec.from_string("/job:foo/replica:0/task:3/device:GPU:7")
self.assertEquals(7, d.device_index)
def testMerge(self):
d = device.DeviceSpec.from_string("/job:foo/replica:0")
self.assertEquals("/job:foo/replica:0", d.to_string())
d.merge_from(device.DeviceSpec.from_string("/task:1/device:GPU:2"))
self.assertEquals("/job:foo/replica:0/task:1/device:GPU:2", d.to_string())
d = device.DeviceSpec()
d.merge_from(device.DeviceSpec.from_string("/task:1/cpu:0"))
self.assertEquals("/task:1/device:CPU:0", d.to_string())
d.merge_from(device.DeviceSpec.from_string("/job:boo/device:GPU:0"))
self.assertEquals("/job:boo/task:1/device:GPU:0", d.to_string())
d.merge_from(device.DeviceSpec.from_string("/job:muu/cpu:2"))
self.assertEquals("/job:muu/task:1/device:CPU:2", d.to_string())
d.merge_from(device.DeviceSpec.from_string(
"/job:muu/device:MyFunnyDevice:2"))
self.assertEquals("/job:muu/task:1/device:MyFunnyDevice:2", d.to_string())
def testCanonicalName(self):
self.assertEqual("/job:foo/replica:0",
device.canonical_name("/job:foo/replica:0"))
self.assertEqual("/job:foo/replica:0",
device.canonical_name("/replica:0/job:foo"))
self.assertEqual("/job:foo/replica:0/task:0",
device.canonical_name("/job:foo/replica:0/task:0"))
self.assertEqual("/job:foo/replica:0/task:0",
device.canonical_name("/job:foo/task:0/replica:0"))
self.assertEqual("/device:CPU:0",
device.canonical_name("/device:CPU:0"))
self.assertEqual("/device:GPU:2",
device.canonical_name("/device:GPU:2"))
self.assertEqual("/job:foo/replica:0/task:0/device:GPU:0",
device.canonical_name(
"/job:foo/replica:0/task:0/device:GPU:0"))
self.assertEqual("/job:foo/replica:0/task:0/device:GPU:0",
device.canonical_name(
"/device:GPU:0/task:0/replica:0/job:foo"))
def testCheckValid(self):
device.check_valid("/job:foo/replica:0")
with self.assertRaises(Exception) as e:
device.check_valid("/job:j/replica:foo")
self.assertTrue("invalid literal for int" in str(e.exception))
with self.assertRaises(Exception) as e:
device.check_valid("/job:j/task:bar")
self.assertTrue("invalid literal for int" in str(e.exception))
with self.assertRaises(Exception) as e:
device.check_valid("/bar:muu/baz:2")
self.assertTrue("Unknown attribute: 'bar'" in str(e.exception))
with self.assertRaises(Exception) as e:
device.check_valid("/cpu:0/device:GPU:2")
self.assertTrue("Cannot specify multiple device" in str(e.exception))
if __name__ == "__main__":
googletest.main()
|
apache-2.0
|
albertshift/omim
|
3party/protobuf/python/google/protobuf/internal/text_encoding_test.py
|
74
|
2858
|
#! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.text_encoding."""
from google.apputils import basetest
from google.protobuf import text_encoding
TEST_VALUES = [
("foo\\rbar\\nbaz\\t",
"foo\\rbar\\nbaz\\t",
b"foo\rbar\nbaz\t"),
("\\'full of \\\"sound\\\" and \\\"fury\\\"\\'",
"\\'full of \\\"sound\\\" and \\\"fury\\\"\\'",
b"'full of \"sound\" and \"fury\"'"),
("signi\\\\fying\\\\ nothing\\\\",
"signi\\\\fying\\\\ nothing\\\\",
b"signi\\fying\\ nothing\\"),
("\\010\\t\\n\\013\\014\\r",
"\x08\\t\\n\x0b\x0c\\r",
b"\010\011\012\013\014\015")]
class TextEncodingTestCase(basetest.TestCase):
def testCEscape(self):
for escaped, escaped_utf8, unescaped in TEST_VALUES:
self.assertEquals(escaped,
text_encoding.CEscape(unescaped, as_utf8=False))
self.assertEquals(escaped_utf8,
text_encoding.CEscape(unescaped, as_utf8=True))
def testCUnescape(self):
for escaped, escaped_utf8, unescaped in TEST_VALUES:
self.assertEquals(unescaped, text_encoding.CUnescape(escaped))
self.assertEquals(unescaped, text_encoding.CUnescape(escaped_utf8))
if __name__ == "__main__":
basetest.main()
|
apache-2.0
|
robwebset/script.tvtunes
|
plugin.py
|
1
|
17633
|
# -*- coding: utf-8 -*-
import sys
import os
import re
import urllib
import urlparse
import xbmc
import xbmcgui
import xbmcplugin
import xbmcaddon
import xbmcvfs
if sys.version_info < (2, 7):
import simplejson
else:
import json as simplejson
# Import the common settings
from resources.lib.settings import Settings
from resources.lib.settings import log
from resources.lib.settings import os_path_join
from resources.lib.settings import os_path_split
from resources.lib.settings import list_dir
from resources.lib.settings import normalize_string
from resources.lib.settings import dir_exists
from resources.lib.themeFinder import ThemeFiles
ADDON = xbmcaddon.Addon(id='script.tvtunes')
ICON = ADDON.getAddonInfo('icon')
FANART = ADDON.getAddonInfo('fanart')
###################################################################
# Class to handle the navigation information for the plugin
###################################################################
class MenuNavigator():
MOVIES = 'movies'
TVSHOWS = 'tvshows'
MUSICVIDEOS = 'musicvideos'
def __init__(self, base_url, addon_handle):
self.base_url = base_url
self.addon_handle = addon_handle
# Get the current state of the filter
currentSetting = xbmcgui.Window(12003).getProperty("TvTunes_BrowserMissingThemesOnly")
if currentSetting == "true":
self.missingThemesOnly = 1
else:
self.missingThemesOnly = 0
# Creates a URL for a directory
def _build_url(self, query):
return self.base_url + '?' + urllib.urlencode(query)
# Display the default list of items in the root menu
def showRootMenu(self):
# Movies
url = self._build_url({'mode': 'folder', 'foldername': MenuNavigator.MOVIES})
li = xbmcgui.ListItem(ADDON.getLocalizedString(32201), iconImage=ICON)
li.setProperty("Fanart_Image", FANART)
li.addContextMenuItems([], replaceItems=True)
xbmcplugin.addDirectoryItem(handle=self.addon_handle, url=url, listitem=li, isFolder=True)
# TV Shows
url = self._build_url({'mode': 'folder', 'foldername': MenuNavigator.TVSHOWS})
li = xbmcgui.ListItem(ADDON.getLocalizedString(32202), iconImage=ICON)
li.setProperty("Fanart_Image", FANART)
li.addContextMenuItems([], replaceItems=True)
xbmcplugin.addDirectoryItem(handle=self.addon_handle, url=url, listitem=li, isFolder=True)
# Music Videos
url = self._build_url({'mode': 'folder', 'foldername': MenuNavigator.MUSICVIDEOS})
li = xbmcgui.ListItem(ADDON.getLocalizedString(32203), iconImage=ICON)
li.setProperty("Fanart_Image", FANART)
li.addContextMenuItems([], replaceItems=True)
xbmcplugin.addDirectoryItem(handle=self.addon_handle, url=url, listitem=li, isFolder=True)
# Add a blank line before the filters
li = xbmcgui.ListItem("", iconImage=ICON)
li.setProperty("Fanart_Image", FANART)
li.addContextMenuItems([], replaceItems=True)
xbmcplugin.addDirectoryItem(handle=self.addon_handle, url="", listitem=li, isFolder=False)
# Filter: Show only missing themes
url = self._build_url({'mode': 'filter', 'filtertype': 'MissingThemesOnly'})
filterTitle = " %s" % ADDON.getLocalizedString(32204)
li = xbmcgui.ListItem(filterTitle, iconImage=ICON)
li.setProperty("Fanart_Image", FANART)
li.setInfo('video', {'PlayCount': self.missingThemesOnly})
li.addContextMenuItems([], replaceItems=True)
xbmcplugin.addDirectoryItem(handle=self.addon_handle, url=url, listitem=li, isFolder=False)
xbmcplugin.endOfDirectory(self.addon_handle)
# Show the list of videos in a given set
def showFolder(self, foldername):
# Check for the special case of manually defined folders
if foldername == MenuNavigator.TVSHOWS:
self.setVideoList('GetTVShows', MenuNavigator.TVSHOWS)
elif foldername == MenuNavigator.MOVIES:
self.setVideoList('GetMovies', MenuNavigator.MOVIES)
elif foldername == MenuNavigator.MUSICVIDEOS:
self.setVideoList('GetMusicVideos', MenuNavigator.MUSICVIDEOS)
# Produce the list of videos and flag which ones have themes
def setVideoList(self, jsonGet, target):
videoItems = self.getVideos(jsonGet, target)
for videoItem in videoItems:
# Get the path where the theme should be stored
path = self.getPathForVideoItem(videoItem)
# Create the list-item for this video
li = xbmcgui.ListItem(videoItem['title'], iconImage=videoItem['thumbnail'])
# Remove the default context menu
li.addContextMenuItems([], replaceItems=True)
# Set the background image
if videoItem['fanart'] is not None:
li.setProperty("Fanart_Image", videoItem['fanart'])
# If theme already exists flag it using the play count
# This will normally put a tick on the GUI
if self._doesThemeExist(path):
# A theme already exists, see if we are showing only missing themes
if self.missingThemesOnly == 1:
# skip this theme
continue
li.setInfo('video', {'PlayCount': 1})
# Check the parent directory
elif Settings.isThemeDirEnabled() and self._doesThemeExist(path, True):
# The Theme directory is set, there is no theme in there
# but we have a theme that will play, so flag it
li.setProperty("ResumeTime", "50")
if videoItem['originaltitle'] is not None:
url = self._build_url({'mode': 'findtheme', 'foldername': target, 'path': path.encode("utf-8"), 'title': videoItem['title'].encode("utf-8"), 'isTvShow': videoItem['isTvShow'], 'year': videoItem['year'], 'imdb': videoItem['imdb'], 'originaltitle': videoItem['originaltitle'].encode("utf-8")})
else:
url = self._build_url({'mode': 'findtheme', 'foldername': target, 'path': path.encode("utf-8"), 'title': videoItem['title'].encode("utf-8"), 'isTvShow': videoItem['isTvShow'], 'year': videoItem['year'], 'imdb': videoItem['imdb']})
xbmcplugin.addDirectoryItem(handle=self.addon_handle, url=url, listitem=li, isFolder=False)
xbmcplugin.endOfDirectory(self.addon_handle)
# Do a lookup in the database for the given type of videos
def getVideos(self, jsonGet, target):
origTitleRequest = ', "imdbnumber", "originaltitle"'
if target == MenuNavigator.MUSICVIDEOS:
origTitleRequest = ''
json_query = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.%s", "params": {"properties": ["title", "file", "thumbnail", "fanart", "year"%s], "sort": { "method": "title" } }, "id": 1}' % (jsonGet, origTitleRequest))
# json_query = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.%s", "params": {"properties": ["title", "file", "thumbnail", "fanart", "imdbnumber", "year"%s], "sort": { "method": "title" } }, "id": 1}' % (jsonGet, origTitleRequest))
json_query = unicode(json_query, 'utf-8', errors='ignore')
json_response = simplejson.loads(json_query)
log(json_response)
Videolist = []
if ("result" in json_response) and (target in json_response['result']):
for item in json_response['result'][target]:
videoItem = {}
videoItem['title'] = item['title']
# The file is actually the path for a TV Show, the video file for movies
videoItem['file'] = item['file']
videoItem['year'] = item['year']
videoItem['isTvShow'] = False
if target == MenuNavigator.TVSHOWS:
videoItem['isTvShow'] = True
if item['thumbnail'] is None:
item['thumbnail'] = 'DefaultFolder.png'
else:
videoItem['thumbnail'] = item['thumbnail']
videoItem['fanart'] = item['fanart']
if 'originaltitle' in item:
videoItem['originaltitle'] = item['originaltitle']
else:
videoItem['originaltitle'] = None
if 'imdbnumber' in item:
videoItem['imdb'] = item['imdbnumber']
else:
videoItem['imdb'] = None
Videolist.append(videoItem)
return Videolist
# Checks if a theme exists in a directory
def _doesThemeExist(self, directory, checkParent=False, incAudioThemes=True, incVideoThemes=True):
log("doesThemeExist: Checking directory: %s" % directory)
# Check for custom theme directory
if Settings.isThemeDirEnabled():
themeDir = os_path_join(directory, Settings.getThemeDirectory())
# Check if this directory exists
if not dir_exists(themeDir):
workingPath = directory
# If the path currently ends in the directory separator
# then we need to clear an extra one
if (workingPath[-1] == os.sep) or (workingPath[-1] == os.altsep):
workingPath = workingPath[:-1]
# If not check to see if we have a DVD VOB
if (os_path_split(workingPath)[1] == 'VIDEO_TS') or (os_path_split(workingPath)[1] == 'BDMV'):
# Check the parent of the DVD Dir
themeDir = os_path_split(workingPath)[0]
themeDir = os_path_join(themeDir, Settings.getThemeDirectory())
directory = themeDir
# Check to see if we need to check the parent directory
if checkParent:
directory = os_path_split(directory)[0]
# check if the directory exists before searching
if dir_exists(directory):
# Generate the regex
audioOnly = False
videoOnly = False
if not incAudioThemes:
videoOnly = True
if not incVideoThemes:
audioOnly = True
themeFileRegEx = Settings.getThemeFileRegEx(audioOnly=audioOnly, videoOnly=videoOnly)
dirs, files = list_dir(directory)
for aFile in files:
m = re.search(themeFileRegEx, aFile, re.IGNORECASE)
if m:
log("doesThemeExist: Found match: " + aFile)
return True
# Check if an NFO file exists
nfoFileName = os_path_join(directory, "tvtunes.nfo")
if xbmcvfs.exists(nfoFileName):
log("doesThemeExist: Found match: " + nfoFileName)
return True
return False
# Fetch a single theme
def fetchTheme(self, title, path, originaltitle=None, isTvShow=None, year=None, imdb=None):
# If there is already a theme then start playing it
self._startPlayingExistingTheme(path)
if Settings.isThemeDirEnabled() and self._doesThemeExist(path, True):
# Prompt user if we should move themes in the parent
# directory into the theme directory
moveExistingThemes = xbmcgui.Dialog().yesno(ADDON.getLocalizedString(32105), ADDON.getLocalizedString(32206), ADDON.getLocalizedString(32207))
# Check if we need to move a theme file
if moveExistingThemes:
log("fetchAllMissingThemes: Moving theme for %s" % title)
self._moveToThemeFolder(path)
# Stop playing any theme that started
self._stopPlayingTheme()
# Now reload the screen to reflect the change
xbmc.executebuiltin("Container.Refresh")
return
def _startPlayingExistingTheme(self, path):
log("startPlayingExistingTheme: Playing existing theme for %s" % path)
# Search for the themes
themeFiles = ThemeFiles(path)
if themeFiles.hasThemes():
playlist = themeFiles.getThemePlaylist()
# Stop playing any existing theme
self._stopPlayingTheme()
xbmc.Player().play(playlist)
else:
log("No themes found for %s" % path)
del themeFiles
def _stopPlayingTheme(self):
# Check if a tune is already playing
if xbmc.Player().isPlayingAudio():
xbmc.Player().stop()
while xbmc.Player().isPlayingAudio():
xbmc.sleep(5)
# Moves a theme that is not in a theme folder to a theme folder
def _moveToThemeFolder(self, directory):
log("moveToThemeFolder: path = %s" % directory)
# Handle the case where we have a disk image
if (os_path_split(directory)[1] == 'VIDEO_TS') or (os_path_split(directory)[1] == 'BDMV'):
directory = os_path_split(directory)[0]
dirs, files = list_dir(directory)
for aFile in files:
m = re.search(Settings.getThemeFileRegEx(directory), aFile, re.IGNORECASE)
if m:
srcpath = os_path_join(directory, aFile)
log("fetchAllMissingThemes: Found match: %s" % srcpath)
targetpath = os_path_join(directory, Settings.getThemeDirectory())
# Make sure the theme directory exists
if not dir_exists(targetpath):
try:
xbmcvfs.mkdir(targetpath)
except:
log("fetchAllMissingThemes: Failed to create directory: %s" % targetpath, True, xbmc.LOGERROR)
break
else:
log("moveToThemeFolder: directory already exists %s" % targetpath)
# Add the filename to the path
targetpath = os_path_join(targetpath, aFile)
if not xbmcvfs.rename(srcpath, targetpath):
log("moveToThemeFolder: Failed to move file from %s to %s" % (srcpath, targetpath))
# Searches for the path from a video item
def getPathForVideoItem(self, videoItem):
path = ""
# Get the path where the theme should be stored
if Settings.isCustomPathEnabled():
path = os_path_join(Settings.getCustomPath(), normalize_string(videoItem['title']))
else:
path = videoItem['file']
# Handle stacked files that have a custom file name format
if path.startswith("stack://"):
path = path.replace("stack://", "").split(" , ", 1)[0]
# Need to remove the filename from the end as we just want the directory
fileExt = os.path.splitext(path)[1]
# If this is a file, then get it's parent directory
if fileExt is not None and fileExt != "":
path = os_path_split(path)[0]
return path
################################
# Main of the TvTunes Plugin
################################
if __name__ == '__main__':
# Get all the arguments
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
# Record what the plugin deals with, files in our case
xbmcplugin.setContent(addon_handle, 'files')
# Get the current mode from the arguments, if none set, then use None
mode = args.get('mode', None)
log("TvTunesPlugin: Called with addon_handle = %d" % addon_handle)
# If None, then at the root
if mode is None:
log("TvTunesPlugin: Mode is NONE - showing root menu")
menuNav = MenuNavigator(base_url, addon_handle)
menuNav.showRootMenu()
del menuNav
elif mode[0] == 'folder':
log("TvTunesPlugin: Mode is FOLDER")
# Get the actual folder that was navigated to
foldername = args.get('foldername', None)
if (foldername is not None) and (len(foldername) > 0):
menuNav = MenuNavigator(base_url, addon_handle)
menuNav.showFolder(foldername[0])
del menuNav
elif mode[0] == 'findtheme':
log("TvTunesPlugin: Mode is FIND THEME")
# Get the actual title and path that was navigated to
title = args.get('title', None)
path = args.get('path', None)
originaltitle = args.get('originaltitle', None)
isTvShow = args.get('isTvShow', False)
year = args.get('year', None)
imdb = args.get('imdb', None)
if originaltitle is not None:
originaltitle = originaltitle[0]
if isTvShow is not None:
if isTvShow[0] in [False, 'False']:
isTvShow = False
else:
isTvShow = True
if year is not None:
year = year[0]
if imdb is not None:
imdb = imdb[0]
# Perform the fetch
menuNav = MenuNavigator(base_url, addon_handle)
menuNav.fetchTheme(title[0], path[0], originaltitle, isTvShow, year, imdb)
del menuNav
elif mode[0] == 'filter':
log("TvTunesPlugin: Mode is FILTER")
# Only one filter at the moment
# Get the current state of the filter
currentSetting = xbmcgui.Window(12003).getProperty("TvTunes_BrowserMissingThemesOnly")
if currentSetting == "true":
xbmcgui.Window(12003).clearProperty("TvTunes_BrowserMissingThemesOnly")
else:
xbmcgui.Window(12003).setProperty("TvTunes_BrowserMissingThemesOnly", "true")
# Now reload the screen to reflect the change
xbmc.executebuiltin("Container.Refresh")
|
gpl-2.0
|
HKUST-SING/tensorflow
|
tensorflow/contrib/distributions/python/kernel_tests/quantized_distribution_test.py
|
32
|
17752
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
distributions = distributions_lib
rng = np.random.RandomState(123)
class QuantizedDistributionTest(test.TestCase):
def _assert_all_finite(self, array):
self.assertTrue(np.isfinite(array).all())
def testQuantizationOfUniformWithCutoffsHavingNoEffect(self):
with self.test_session() as sess:
# The Quantized uniform with cutoffs == None divides the real line into:
# R = ...(-1, 0](0, 1](1, 2](2, 3](3, 4]...
# j = ... 0 1 2 3 4 ...
# Since this uniform (below) is supported on [0, 3],
# it places 1/3 of its mass in the intervals j = 1, 2, 3.
# Adding a cutoff at y = 0 changes the picture to
# R = ...(-inf, 0](0, 1](1, 2](2, 3](3, 4]...
# j = ... 0 1 2 3 4 ...
# So the QUniform still places 1/3 of its mass in the intervals
# j = 1, 2, 3.
# Adding a cutoff at y = 3 changes the picture to
# R = ...(-1, 0](0, 1](1, 2](2, inf)
# j = ... 0 1 2 3
# and the QUniform still places 1/3 of its mass in the intervals
# j = 1, 2, 3.
for lcut, ucut in [(None, None), (0.0, None), (None, 3.0), (0.0, 3.0),
(-10., 10.)]:
qdist = distributions.QuantizedDistribution(
distribution=distributions.Uniform(low=0.0, high=3.0),
low=lcut,
high=ucut)
# pmf
pmf_n1, pmf_0, pmf_1, pmf_2, pmf_3, pmf_4, pmf_5 = sess.run(
qdist.prob([-1., 0., 1., 2., 3., 4., 5.]))
# uniform had no mass below -1.
self.assertAllClose(0., pmf_n1)
# uniform had no mass below 0.
self.assertAllClose(0., pmf_0)
# uniform put 1/3 of its mass in each of (0, 1], (1, 2], (2, 3],
# which are the intervals j = 1, 2, 3.
self.assertAllClose(1 / 3, pmf_1)
self.assertAllClose(1 / 3, pmf_2)
self.assertAllClose(1 / 3, pmf_3)
# uniform had no mass in (3, 4] or (4, 5], which are j = 4, 5.
self.assertAllClose(0 / 3, pmf_4)
self.assertAllClose(0 / 3, pmf_5)
# cdf
cdf_n1, cdf_0, cdf_1, cdf_2, cdf_2p5, cdf_3, cdf_4, cdf_5 = sess.run(
qdist.cdf([-1., 0., 1., 2., 2.5, 3., 4., 5.]))
self.assertAllClose(0., cdf_n1)
self.assertAllClose(0., cdf_0)
self.assertAllClose(1 / 3, cdf_1)
self.assertAllClose(2 / 3, cdf_2)
# Note fractional values allowed for cdfs of discrete distributions.
# And adding 0.5 makes no difference because the quantized dist has
# mass only on the integers, never in between.
self.assertAllClose(2 / 3, cdf_2p5)
self.assertAllClose(3 / 3, cdf_3)
self.assertAllClose(3 / 3, cdf_4)
self.assertAllClose(3 / 3, cdf_5)
def testQuantizationOfUniformWithCutoffsInTheMiddle(self):
with self.test_session() as sess:
# The uniform is supported on [-3, 3]
# Consider partitions the real line in intervals
# ...(-3, -2](-2, -1](-1, 0](0, 1](1, 2](2, 3] ...
# Before cutoffs, the uniform puts a mass of 1/6 in each interval written
# above. Because of cutoffs, the qdist considers intervals and indices
# ...(-infty, -1](-1, 0](0, infty) ...
# -1 0 1
qdist = distributions.QuantizedDistribution(
distribution=distributions.Uniform(low=-3., high=3.),
low=-1.0,
high=1.0)
# pmf
cdf_n3, cdf_n2, cdf_n1, cdf_0, cdf_0p5, cdf_1, cdf_10 = sess.run(
qdist.cdf([-3., -2., -1., 0., 0.5, 1.0, 10.0]))
# Uniform had no mass on (-4, -3] or (-3, -2]
self.assertAllClose(0., cdf_n3)
self.assertAllClose(0., cdf_n2)
# Uniform had 1/6 of its mass in each of (-3, -2], and (-2, -1], which
# were collapsed into (-infty, -1], which is now the "-1" interval.
self.assertAllClose(1 / 3, cdf_n1)
# The j=0 interval contained mass from (-3, 0], which is 1/2 of the
# uniform's mass.
self.assertAllClose(1 / 2, cdf_0)
# Adding 0.5 makes no difference because the quantized dist has mass on
# the integers, not in between them.
self.assertAllClose(1 / 2, cdf_0p5)
# After applying the cutoff, all mass was either in the interval
# (0, infty), or below. (0, infty) is the interval indexed by j=1,
# so pmf(1) should equal 1.
self.assertAllClose(1., cdf_1)
# Since no mass of qdist is above 1,
# pmf(10) = P[Y <= 10] = P[Y <= 1] = pmf(1).
self.assertAllClose(1., cdf_10)
def testQuantizationOfBatchOfUniforms(self):
batch_shape = (5, 5)
with self.test_session():
# The uniforms are supported on [0, 10]. The qdist considers the
# intervals
# ... (0, 1](1, 2]...(9, 10]...
# with the intervals displayed above each holding 1 / 10 of the mass.
# The qdist will be defined with no cutoffs,
uniform = distributions.Uniform(
low=array_ops.zeros(batch_shape, dtype=dtypes.float32),
high=10 * array_ops.ones(batch_shape, dtype=dtypes.float32))
qdist = distributions.QuantizedDistribution(
distribution=uniform, low=None, high=None)
# x is random integers in {-3,...,12}.
x = rng.randint(-3, 13, size=batch_shape).astype(np.float32)
# pmf
# qdist.prob(j) = 1 / 10 for j in {1,...,10}, and 0 otherwise,
expected_pmf = (1 / 10) * np.ones(batch_shape)
expected_pmf[x < 1] = 0.
expected_pmf[x > 10] = 0.
self.assertAllClose(expected_pmf, qdist.prob(x).eval())
# cdf
# qdist.cdf(j)
# = 0 for j < 1
# = j / 10, for j in {1,...,10},
# = 1, for j > 10.
expected_cdf = x.copy() / 10
expected_cdf[x < 1] = 0.
expected_cdf[x > 10] = 1.
self.assertAllClose(expected_cdf, qdist.cdf(x).eval())
def testSamplingFromBatchOfNormals(self):
batch_shape = (2,)
with self.test_session():
normal = distributions.Normal(
loc=array_ops.zeros(
batch_shape, dtype=dtypes.float32),
scale=array_ops.ones(
batch_shape, dtype=dtypes.float32))
qdist = distributions.QuantizedDistribution(
distribution=normal, low=0., high=None)
samps = qdist.sample(5000, seed=42)
samps_v = samps.eval()
# With low = 0, the interval j=0 is (-infty, 0], which holds 1/2
# of the mass of the normals.
# rtol chosen to be 2x as large as necessary to pass.
self.assertAllClose([0.5, 0.5], (samps_v == 0).mean(axis=0), rtol=0.03)
# The interval j=1 is (0, 1], which is from the mean to one standard
# deviation out. This should contain 0.6827 / 2 of the mass.
self.assertAllClose(
[0.6827 / 2, 0.6827 / 2], (samps_v == 1).mean(axis=0), rtol=0.03)
def testSamplesAgreeWithCdfForSamplesOverLargeRange(self):
# Consider the cdf for distribution X, F(x).
# If U ~ Uniform[0, 1], then Y := F^{-1}(U) is distributed like X since
# P[Y <= y] = P[F^{-1}(U) <= y] = P[U <= F(y)] = F(y).
# If F is a bijection, we also have Z = F(X) is Uniform.
#
# Make an exponential with large mean (= 100). This ensures we will get
# quantized values over a large range. This large range allows us to
# pretend that the cdf F is a bijection, and hence F(X) is uniform.
# Note that F cannot be bijection since it is constant between the
# integers. Hence, F(X) (see below) will not be uniform exactly.
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Exponential(rate=0.01))
# X ~ QuantizedExponential
x = qdist.sample(10000, seed=42)
# Z = F(X), should be Uniform.
z = qdist.cdf(x)
# Compare the CDF of Z to that of a Uniform.
# dist = maximum distance between P[Z <= a] and P[U <= a].
# We ignore pvalue, since of course this distribution is not exactly, and
# with so many sample points we would get a false fail.
dist, _ = stats.kstest(z.eval(), "uniform")
# Since the distribution take values (approximately) in [0, 100], the
# cdf should have jumps (approximately) every 1/100 of the way up.
# Assert that the jumps are not more than 2/100.
self.assertLess(dist, 0.02)
def testSamplesAgreeWithPdfForSamplesOverSmallRange(self):
# Testing that samples and pdf agree for a small range is important because
# it makes sure the bin edges are consistent.
# Make an exponential with mean 5.
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Exponential(rate=0.2))
# Standard error should be less than 1 / (2 * sqrt(n_samples))
n_samples = 10000
stddev_err_bound = 1 / (2 * np.sqrt(n_samples))
samps = qdist.sample((n_samples,), seed=42).eval()
# The smallest value the samples can take on is 1, which corresponds to
# the interval (0, 1]. Recall we use ceiling in the sampling definition.
self.assertLess(0.5, samps.min())
x_vals = np.arange(1, 11).astype(np.float32)
pmf_vals = qdist.prob(x_vals).eval()
for ii in range(10):
self.assertAllClose(
pmf_vals[ii], (samps == x_vals[ii]).mean(), atol=stddev_err_bound)
def testNormalCdfAndSurvivalFunction(self):
# At integer values, the result should be the same as the standard normal.
batch_shape = (3, 3)
mu = rng.randn(*batch_shape)
sigma = rng.rand(*batch_shape) + 1.0
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
sp_normal = stats.norm(mu, sigma)
x = rng.randint(-5, 5, size=batch_shape).astype(np.float64)
self.assertAllClose(sp_normal.cdf(x), qdist.cdf(x).eval())
self.assertAllClose(sp_normal.sf(x), qdist.survival_function(x).eval())
def testNormalLogCdfAndLogSurvivalFunction(self):
# At integer values, the result should be the same as the standard normal.
batch_shape = (3, 3)
mu = rng.randn(*batch_shape)
sigma = rng.rand(*batch_shape) + 1.0
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
sp_normal = stats.norm(mu, sigma)
x = rng.randint(-10, 10, size=batch_shape).astype(np.float64)
self.assertAllClose(sp_normal.logcdf(x), qdist.log_cdf(x).eval())
self.assertAllClose(
sp_normal.logsf(x), qdist.log_survival_function(x).eval())
def testNormalProbWithCutoffs(self):
# At integer values, the result should be the same as the standard normal.
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=-2.,
high=2.)
sm_normal = stats.norm(0., 1.)
# These cutoffs create partitions of the real line, and indices:
# (-inf, -2](-2, -1](-1, 0](0, 1](1, inf)
# -2 -1 0 1 2
# Test interval (-inf, -2], <--> index -2.
self.assertAllClose(sm_normal.cdf(-2), qdist.prob(-2.).eval(), atol=0)
# Test interval (-2, -1], <--> index -1.
self.assertAllClose(
sm_normal.cdf(-1) - sm_normal.cdf(-2), qdist.prob(-1.).eval(), atol=0)
# Test interval (-1, 0], <--> index 0.
self.assertAllClose(
sm_normal.cdf(0) - sm_normal.cdf(-1), qdist.prob(0.).eval(), atol=0)
# Test interval (1, inf), <--> index 2.
self.assertAllClose(1. - sm_normal.cdf(1), qdist.prob(2.).eval(), atol=0)
def testNormalLogProbWithCutoffs(self):
# At integer values, the result should be the same as the standard normal.
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=-2.,
high=2.)
sm_normal = stats.norm(0., 1.)
# These cutoffs create partitions of the real line, and indices:
# (-inf, -2](-2, -1](-1, 0](0, 1](1, inf)
# -2 -1 0 1 2
# Test interval (-inf, -2], <--> index -2.
self.assertAllClose(
np.log(sm_normal.cdf(-2)), qdist.log_prob(-2.).eval(), atol=0)
# Test interval (-2, -1], <--> index -1.
self.assertAllClose(
np.log(sm_normal.cdf(-1) - sm_normal.cdf(-2)),
qdist.log_prob(-1.).eval(),
atol=0)
# Test interval (-1, 0], <--> index 0.
self.assertAllClose(
np.log(sm_normal.cdf(0) - sm_normal.cdf(-1)),
qdist.log_prob(0.).eval(),
atol=0)
# Test interval (1, inf), <--> index 2.
self.assertAllClose(
np.log(1. - sm_normal.cdf(1)), qdist.log_prob(2.).eval(), atol=0)
def testLogProbAndGradGivesFiniteResults(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
mu = variables.Variable(0., name="mu", dtype=dtype)
sigma = variables.Variable(1., name="sigma", dtype=dtype)
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
x = np.arange(-100, 100, 2).astype(dtype)
proba = qdist.log_prob(x)
grads = gradients_impl.gradients(proba, [mu, sigma])
with self.test_session(graph=g):
variables.global_variables_initializer().run()
self._assert_all_finite(proba.eval())
self._assert_all_finite(grads[0].eval())
self._assert_all_finite(grads[1].eval())
def testProbAndGradGivesFiniteResultsForCommonEvents(self):
with self.test_session():
mu = variables.Variable(0.0, name="mu")
sigma = variables.Variable(1.0, name="sigma")
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
x = math_ops.ceil(4 * rng.rand(100).astype(np.float32) - 2)
variables.global_variables_initializer().run()
proba = qdist.prob(x)
self._assert_all_finite(proba.eval())
grads = gradients_impl.gradients(proba, [mu, sigma])
self._assert_all_finite(grads[0].eval())
self._assert_all_finite(grads[1].eval())
def testLowerCutoffMustBeBelowUpperCutoffOrWeRaise(self):
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=1., # not strictly less than high.
high=1.,
validate_args=True)
self.assertTrue(qdist.validate_args) # Default is True.
with self.assertRaisesOpError("must be strictly less"):
qdist.sample().eval()
def testCutoffsMustBeIntegerValuedIfValidateArgsTrue(self):
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=1.5,
high=10.,
validate_args=True)
self.assertTrue(qdist.validate_args) # Default is True.
with self.assertRaisesOpError("has non-integer components"):
qdist.sample().eval()
def testCutoffsCanBeFloatValuedIfValidateArgsFalse(self):
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=0., scale=1., validate_args=False),
low=1.5,
high=10.11)
self.assertFalse(qdist.validate_args) # Default is True.
# Should not raise
qdist.sample().eval()
def testDtypeAndShapeInheritedFromBaseDist(self):
batch_shape = (2, 3)
with self.test_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=array_ops.zeros(batch_shape),
scale=array_ops.zeros(batch_shape)),
low=1.0,
high=10.0)
self.assertEqual(batch_shape, qdist.batch_shape)
self.assertAllEqual(batch_shape, qdist.batch_shape_tensor().eval())
self.assertEqual((), qdist.event_shape)
self.assertAllEqual((), qdist.event_shape_tensor().eval())
samps = qdist.sample(10, seed=42)
self.assertEqual((10,) + batch_shape, samps.get_shape())
self.assertAllEqual((10,) + batch_shape, samps.eval().shape)
y = rng.randint(0, 5, size=batch_shape).astype(np.float32)
self.assertEqual(batch_shape, qdist.prob(y).get_shape())
if __name__ == "__main__":
test.main()
|
apache-2.0
|
Karel-van-de-Plassche/bokeh
|
bokeh/command/subcommands/tests/test_json.py
|
13
|
3465
|
from __future__ import absolute_import
import argparse
import pytest
import os
import sys
is_python2 = sys.version_info[0] == 2
import bokeh.command.subcommands.json as scjson
from bokeh.command.bootstrap import main
from bokeh.util.testing import TmpDir, WorkingDir, with_directory_contents
from . import basic_scatter_script
def test_create():
import argparse
from bokeh.command.subcommand import Subcommand
obj = scjson.JSON(parser=argparse.ArgumentParser())
assert isinstance(obj, Subcommand)
def test_name():
assert scjson.JSON.name == "json"
def test_help():
assert scjson.JSON.help == "Create JSON files for one or more applications"
def test_args():
assert scjson.JSON.args == (
('files', dict(
metavar='DIRECTORY-OR-SCRIPT',
nargs='+',
help="The app directories or scripts to generate JSON for",
default=None
)),
('--indent', dict(
metavar='LEVEL',
type=int,
help="indentation to use when printing",
default=None
)),
(('-o', '--output'), dict(
metavar='FILENAME',
action='append',
type=str,
help="Name of the output file or - for standard output."
)),
('--args', dict(
metavar='COMMAND-LINE-ARGS',
nargs=argparse.REMAINDER,
help="Any command line arguments remaining are passed on to the application handler",
)),
)
def test_no_script(capsys):
with (TmpDir(prefix="bokeh-json-no-script")) as dirname:
with WorkingDir(dirname):
with pytest.raises(SystemExit):
main(["bokeh", "json"])
out, err = capsys.readouterr()
if is_python2:
too_few = "too few arguments"
else:
too_few = "the following arguments are required: DIRECTORY-OR-SCRIPT"
assert err == """usage: bokeh json [-h] [--indent LEVEL] [-o FILENAME] [--args ...]
DIRECTORY-OR-SCRIPT [DIRECTORY-OR-SCRIPT ...]
bokeh json: error: %s
""" % (too_few)
assert out == ""
def test_basic_script(capsys):
def run(dirname):
with WorkingDir(dirname):
main(["bokeh", "json", "scatter.py"])
out, err = capsys.readouterr()
assert err == ""
assert out == ""
assert set(["scatter.json", "scatter.py"]) == set(os.listdir(dirname))
with_directory_contents({ 'scatter.py' : basic_scatter_script },
run)
def test_basic_script_with_output_after(capsys):
def run(dirname):
with WorkingDir(dirname):
main(["bokeh", "json", "scatter.py", "--output", "foo.json"])
out, err = capsys.readouterr()
assert err == ""
assert out == ""
assert set(["foo.json", "scatter.py"]) == set(os.listdir(dirname))
with_directory_contents({ 'scatter.py' : basic_scatter_script },
run)
def test_basic_script_with_output_before(capsys):
def run(dirname):
with WorkingDir(dirname):
main(["bokeh", "json", "--output", "foo.json", "scatter.py"])
out, err = capsys.readouterr()
assert err == ""
assert out == ""
assert set(["foo.json", "scatter.py"]) == set(os.listdir(dirname))
with_directory_contents({ 'scatter.py' : basic_scatter_script },
run)
|
bsd-3-clause
|
brandjon/simplestruct
|
examples/abstract.py
|
1
|
1261
|
"""Demonstrates how to combine Struct with abstract base classes."""
from abc import ABCMeta, abstractmethod
from simplestruct import Struct, Field, MetaStruct
# A simple ABC. Subclasses must provide an override for foo().
class Abstract(metaclass=ABCMeta):
@abstractmethod
def foo(self):
pass
# ABCs rely on a metaclass that conflicts with Struct's metaclass.
try:
class Concrete(Abstract, Struct):
f = Field
def foo(self):
return self.f ** 2
except TypeError as e:
print(e)
# metaclass conflict: the metaclass of a derived class must
# be a (non-strict) subclass of the metaclasses of all its bases
# So let's make a trivial subclass of ABCMeta and MetaStruct.
class ABCMetaStruct(MetaStruct, ABCMeta):
pass
class Concrete(Abstract, Struct, metaclass=ABCMetaStruct):
f = Field
def foo(self):
return self.f ** 2
c = Concrete(5)
print(c.foo()) # 25
# For convenience we can make a version of Struct that
# incorporates the common metaclass.
class ABCStruct(Struct, metaclass=ABCMetaStruct):
pass
# Now we only have to do:
class Concrete(Abstract, ABCStruct):
f = Field
def foo(self):
return self.f ** 2
c = Concrete(5)
print(c.foo()) # 25
|
mit
|
aravindvenkatesan/AgroLD-scripts
|
AgroLD_ETL/riceKB/TropgeneModel.py
|
1
|
8886
|
import pprint
import re
import os
from globalVars import base_vocab_ns
from TropgeneParser import *
__author__ = 'elhassouni'
def tropGeneToRDF(tropGene_map, output_file):
# The differentes variable declaration
tropGene_buffer = '' # initilised the buffer at zero
population_counter, mapefeature_counter, study_counter, qtl_counter, trait_counter = 0, 0, 0, 0, 0
rdf_writer = open(output_file, "w")
study_list = list()
qtl_list = list()
population_list = list()
# The first wrinting in the file is the prefix
print ("*************TropGene RDF conversion begins***********\n")
rdf_writer.write(base + "\t" + "<" + base_uri + "> .\n")
rdf_writer.write(pr + "\t" + rdf_ns + "<" + rdf + "> .\n")
rdf_writer.write(pr + "\t" + rdfs_ns + "<" + rdfs + "> .\n")
rdf_writer.write(pr + "\t" + xsd_ns + "<" + xsd + "> .\n")
rdf_writer.write(pr + "\t" + owl_ns + "<" + owl + "> .\n")
rdf_writer.write(pr + "\t" + base_vocab_ns + "<" + base_vocab_uri + "> .\n")
rdf_writer.write(pr + "\t" + study_ns + "<" + study_uri + "> .\n")
rdf_writer.write(pr + "\t" + population_ns + "<" + population_uri + "> .\n")
rdf_writer.write(pr + "\t" + qtl_ns + "<" + qtl_uri + "> .\n")
rdf_writer.write(pr + "\t" + obo_ns + "<" + obo_uri + "> .\n")
rdf_writer.write(pr + "\t" + edam_ns + "<" + edam_uri + "> .\n")
rdf_writer.write(pr + "\t" + trait_ns + "<" + trait_uri + "> .\n")
rdf_writer.write(pr + "\t" + mapfeature_ns + "<" + mapfeature_uri + "> .\n")
# Study writings: here we browsing files for write the study resources
for records in tropGene_map:
if not records['study_id'] in study_list:
study_counter += 1
study_list.append(records['study_id'])
#print(study_list)
study_id_now = records['study_id']
tropGene_buffer += study_ns + records['study_id'] + "\n"
tropGene_buffer += "\t" + rdfs_ns + "label" + "\t" + " \"" + records['study_name'] + "\" ;\n"
tropGene_buffer += "\t" + rdf_ns + "type" + "\t" + owl_ns + "Class" + " ;\n"
tropGene_buffer += "\t" + rdfs_ns + "subClassOf" + "\t" + obo_ns + "OBI_0000073" + " ;\n"
for records in tropGene_map:
if records['study_id'] == study_id_now:
tropGene_buffer += "\t" + base_vocab_ns + "has_observation" + "\t" + qtl_ns + records['qtl_id'] + " ;\n"
for records in tropGene_map:
if records['study_id'] == study_id_now and not records['population_id'] in population_list:
population_list.append(records['population_id'])
tropGene_buffer += "\t" + base_vocab_ns + "has_population" + "\t" + population_ns + records['population_id'] + " ;\n"
tropGene_buffer = re.sub(' ;$', ' .', tropGene_buffer)
rdf_writer.write(tropGene_buffer)
tropGene_buffer = '' # reset the buffer at zero
population_list = list()
print(tropGene_buffer)
# Population wrinting: here we browsing files for write the study resources
tropGene_buffer = '' # reset the buffer at zero
for records in tropGene_map:
if not records['population_id'] in population_list:
if records['study_id'] in study_list:
population_list.append(records['population_id'])
population_counter += 1
tropGene_buffer += population_ns + records['population_id'] + " \n"
tropGene_buffer += "\t" + rdf_ns + "type" + "\t" + owl_ns + "Class" + " ;\n"
tropGene_buffer += "\t" + rdfs_ns + "subClassOf" + "\t" + obo_ns + "OBI_0000181" + " ;\n"
tropGene_buffer += "\t" + rdfs_ns + "label" + "\t" + " \"" + records['population_name'] + "\" .\n"
rdf_writer.write(tropGene_buffer)
print(tropGene_buffer)
# QTL writing: here we browsing files for write the QTL resources
# writing the Qtl who seo by in the study resource
tropGene_buffer = '' # reset the buffer at zero
for records in tropGene_map:
if records['study_id'] in study_list:
qtl_counter += 1
qtl_id_now = records['qtl_id']
tropGene_buffer += qtl_ns + records['qtl_id'] + "\n"
tropGene_buffer += "\t" + rdfs_ns + "label" + "\t" + " \""+ records['qtl_name'] + "\" ;\n"
tropGene_buffer += "\t" + rdf_ns + "type" + "\t" + owl_ns + "Class" + " ;\n"
tropGene_buffer += "\t" + rdfs_ns + "subClassOf" + "\t" + obo_ns + "SO_0000771" + " ;\n" # a definir correctement
for records in tropGene_map:
if records['qtl_id'] == qtl_id_now:
if records['trait_ontology_id'] == 'NULL' or 'na':
tropGene_buffer += "\t" + base_vocab_ns + "has_trait" + "\t" + trait_ns + records['trait_id_of_tropgene'] + " ;\n"
else:
tropGene_buffer += "\t" + base_vocab_ns + "has_trait" + "\t" + trait_ns + records['trait_ontology_id'] + " ;\n"
tropGene_buffer += "\t" + base_vocab_ns + "has_mapfeature" + "\t" + mapfeature_ns + records['mapfeature_id'] + " .\n"
rdf_writer.write(tropGene_buffer)
print(tropGene_buffer)
# Mapfeature writing: here we browsing files for write the Mapfeature resources
# writing the Mapfeature who seo by in the Qtl resource
tropGene_buffer = '' # reset the buffer at zero
for records in tropGene_map:
if not records['mapfeature_id'] in tropGene_buffer and records['study_id'] in study_list:
mapefeature_counter += 1
tropGene_buffer += mapfeature_ns + records['mapfeature_id'] + "\n"
tropGene_buffer += "\t" + rdf_ns + "type" + "\t" + owl_ns + "Class" + " ;\n"
tropGene_buffer += "\t" + rdfs_ns + "subClassOf" + "\t" + edam_ns + "data_1865" + " ;\n"
tropGene_buffer += "\t" + base_vocab_ns + "is_located_on" + "\t" + " \"" + records['chromosome'] + "\" ;\n"
tropGene_buffer += "\t" + base_vocab_ns + "has_start_position" + "\t" + " \"" + records['start_position'] + "\"^^xsd:integer ;\n"
tropGene_buffer += "\t" + base_vocab_ns + "has_end_position" + "\t" + " \"" + records['stop_position'] + "\"^^xsd:integer .\n"
rdf_writer.write(tropGene_buffer)
print(tropGene_buffer)
# Trait writing: here we browsing files for write the Trait resources
# writing the trait who are seo by in the Qtl resource
tropGene_buffer = '' # reset the buffer at zero
for records in tropGene_map:
if not records['trait_ontology_id'] or not records['trait_id_of_tropgene'] in tropGene_buffer:
if records['study_id'] in study_list:
trait_counter += 1
if records['trait_ontology_id'] == 'NULL' or 'na':
tropGene_buffer += trait_ns + records['trait_id_of_tropgene'] + "\n"
else:
tropGene_buffer += trait_ns + records['trait_ontology_id'] + "\n"
tropGene_buffer += "\t" + rdfs_ns + "label" + "\t" + " \"" + records['trait_name'] + "\" ;\n"
tropGene_buffer += "\t" + rdf_ns + "type" + "\t" + owl_ns + "Class" + " ;\n"
if records['trait_description'] != 'NULL':
tropGene_buffer += "\t" + base_vocab_ns + "description" + "\t" + " \"" + records['trait_description'] + "\" ;\n"
tropGene_buffer += "\t" + rdfs_ns + "subClassOf" + "\t" + obo_ns + "TO_0000387" + " .\n"
rdf_writer.write(tropGene_buffer)
print(tropGene_buffer)
rdf_writer.close()
print("************* TropGene RDF conversion finish ***********\n")
# Some control for to see the QTL and Study number
QTL = "Number of QTL: :" + str(qtl_counter)
print(QTL)
STUDY = "Number of STUDY :" + str(study_counter)
print(STUDY)
MAPFEATURE = "Number of mapfeature :" + str(mapefeature_counter)
print(MAPFEATURE)
TRAIT = "Number of trait :" + str(trait_counter)
print(TRAIT)
POPULATION = "Number of population :" + str(population_counter)
print(POPULATION)
'''
# ---------------------------------------------------------------------------------------------------------
# The test to parsing and transform to RDF data
# ---------------------------------------------------------------------------------------------------------
pp = pprint.PrettyPrinter(indent=4)
#
path = '/media/elhassouni/donnees/Noeud-plante-projet/code-source/test_files/tropgene/rice.csv' # The input
path_output = '/home/elhassouni/Bureau/Tropgene.ttl' # The output
ds = tropGeneParser(path) # The parsing file withe tropGeneParser()
pp.pprint(ds) # For to see in teminal the parsing
tropGeneToRDF(ds, path_output) # The tranformation fonction tropGeneToRdf(input, output)
# ---------------------------------------------------------------------------------------------------------'' \
'''
|
cc0-1.0
|
mvidalgarcia/indico
|
indico/modules/events/persons/util.py
|
2
|
2965
|
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.modules.events.models.persons import EventPerson
from indico.modules.users import User
from indico.modules.users.models.users import UserTitle
from indico.util.user import principal_from_fossil
def create_event_person(event, create_untrusted_persons=False, **data):
"""Create an event person from data passed as kwargs."""
title = next((x.value for x in UserTitle if data.get('title') == x.title), None)
return EventPerson(event=event, email=data.get('email', '').lower(), _title=title,
first_name=data.get('firstName'), last_name=data['familyName'],
affiliation=data.get('affiliation'), address=data.get('address'),
phone=data.get('phone'), is_untrusted=create_untrusted_persons)
def get_event_person_for_user(event, user, create_untrusted_persons=False):
"""Return the event person that links to a given User/Event (if any)."""
return EventPerson.for_user(user, event, is_untrusted=create_untrusted_persons)
def get_event_person(event, data, create_untrusted_persons=False, allow_external=False, allow_emails=False,
allow_networks=False):
"""Get an EventPerson from dictionary data.
If there is already an event person in the same event and for the same user,
it will be returned. Matching is done with the e-mail.
"""
person_type = data.get('_type')
if person_type is None:
if data.get('email'):
email = data['email'].lower()
user = User.query.filter(~User.is_deleted, User.all_emails == email).first()
if user:
return get_event_person_for_user(event, user, create_untrusted_persons=create_untrusted_persons)
elif event:
person = event.persons.filter_by(email=email).first()
if person:
return person
# We have no way to identify an existing event person with the provided information
return create_event_person(event, create_untrusted_persons=create_untrusted_persons, **data)
elif person_type == 'Avatar':
# XXX: existing_data
principal = principal_from_fossil(data, allow_pending=allow_external, allow_emails=allow_emails,
allow_networks=allow_networks)
return get_event_person_for_user(event, principal, create_untrusted_persons=create_untrusted_persons)
elif person_type == 'EventPerson':
return event.persons.filter_by(id=data['id']).one()
elif person_type == 'PersonLink':
return event.persons.filter_by(id=data['personId']).one()
else:
raise ValueError("Unknown person type '{}'".format(person_type))
|
mit
|
ds-hwang/chromium-crosswalk
|
third_party/protobuf/python/google/protobuf/internal/text_format_test.py
|
162
|
23727
|
#! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.text_format."""
__author__ = '[email protected] (Kenton Varda)'
import difflib
import re
import unittest
from google.protobuf import text_format
from google.protobuf.internal import test_util
from google.protobuf import unittest_pb2
from google.protobuf import unittest_mset_pb2
class TextFormatTest(unittest.TestCase):
def ReadGolden(self, golden_filename):
f = test_util.GoldenFile(golden_filename)
golden_lines = f.readlines()
f.close()
return golden_lines
def CompareToGoldenFile(self, text, golden_filename):
golden_lines = self.ReadGolden(golden_filename)
self.CompareToGoldenLines(text, golden_lines)
def CompareToGoldenText(self, text, golden_text):
self.CompareToGoldenLines(text, golden_text.splitlines(1))
def CompareToGoldenLines(self, text, golden_lines):
actual_lines = text.splitlines(1)
self.assertEqual(golden_lines, actual_lines,
"Text doesn't match golden. Diff:\n" +
''.join(difflib.ndiff(golden_lines, actual_lines)))
def testPrintAllFields(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_data.txt')
def testPrintAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_extensions_data.txt')
def testPrintMessageSet(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(text_format.MessageToString(message),
'message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
def testPrintBadEnumValue(self):
message = unittest_pb2.TestAllTypes()
message.optional_nested_enum = 100
message.optional_foreign_enum = 101
message.optional_import_enum = 102
self.CompareToGoldenText(
text_format.MessageToString(message),
'optional_nested_enum: 100\n'
'optional_foreign_enum: 101\n'
'optional_import_enum: 102\n')
def testPrintBadEnumValueExtensions(self):
message = unittest_pb2.TestAllExtensions()
message.Extensions[unittest_pb2.optional_nested_enum_extension] = 100
message.Extensions[unittest_pb2.optional_foreign_enum_extension] = 101
message.Extensions[unittest_pb2.optional_import_enum_extension] = 102
self.CompareToGoldenText(
text_format.MessageToString(message),
'[protobuf_unittest.optional_nested_enum_extension]: 100\n'
'[protobuf_unittest.optional_foreign_enum_extension]: 101\n'
'[protobuf_unittest.optional_import_enum_extension]: 102\n')
def testPrintExotic(self):
message = unittest_pb2.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string: '
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintNestedMessageAsOneLine(self):
message = unittest_pb2.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42;
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_nested_message { bb: 42 }')
def testPrintRepeatedFieldsAsOneLine(self):
message = unittest_pb2.TestAllTypes()
message.repeated_int32.append(1)
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_string.append("Google")
message.repeated_string.append("Zurich")
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_int32: 1 repeated_int32: 1 repeated_int32: 3 '
'repeated_string: "Google" repeated_string: "Zurich"')
def testPrintNestedNewLineInStringAsOneLine(self):
message = unittest_pb2.TestAllTypes()
message.optional_string = "a\nnew\nline"
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'optional_string: "a\\nnew\\nline"')
def testPrintMessageSetAsOneLine(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'message_set {'
' [protobuf_unittest.TestMessageSetExtension1] {'
' i: 23'
' }'
' [protobuf_unittest.TestMessageSetExtension2] {'
' str: \"foo\"'
' }'
' }')
def testPrintExoticAsOneLine(self):
message = unittest_pb2.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(
text_format.MessageToString(message, as_one_line=True)),
'repeated_int64: -9223372036854775808'
' repeated_uint64: 18446744073709551615'
' repeated_double: 123.456'
' repeated_double: 1.23e+22'
' repeated_double: 1.23e-18'
' repeated_string: '
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""'
' repeated_string: "\\303\\274\\352\\234\\237"')
def testRoundTripExoticAsOneLine(self):
message = unittest_pb2.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
# Test as_utf8 = False.
wire_text = text_format.MessageToString(
message, as_one_line=True, as_utf8=False)
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(wire_text, parsed_message)
self.assertEquals(message, parsed_message)
# Test as_utf8 = True.
wire_text = text_format.MessageToString(
message, as_one_line=True, as_utf8=True)
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(wire_text, parsed_message)
self.assertEquals(message, parsed_message)
def testPrintRawUtf8String(self):
message = unittest_pb2.TestAllTypes()
message.repeated_string.append(u'\u00fc\ua71f')
text = text_format.MessageToString(message, as_utf8 = True)
self.CompareToGoldenText(text, 'repeated_string: "\303\274\352\234\237"\n')
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(text, parsed_message)
self.assertEquals(message, parsed_message)
def testMessageToString(self):
message = unittest_pb2.ForeignMessage()
message.c = 123
self.assertEqual('c: 123\n', str(message))
def RemoveRedundantZeros(self, text):
# Some platforms print 1e+5 as 1e+005. This is fine, but we need to remove
# these zeros in order to match the golden file.
text = text.replace('e+0','e+').replace('e+0','e+') \
.replace('e-0','e-').replace('e-0','e-')
# Floating point fields are printed with .0 suffix even if they are
# actualy integer numbers.
text = re.compile('\.0$', re.MULTILINE).sub('', text)
return text
def testMergeGolden(self):
golden_text = '\n'.join(self.ReadGolden('text_format_unittest_data.txt'))
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(golden_text, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEquals(message, parsed_message)
def testMergeGoldenExtensions(self):
golden_text = '\n'.join(self.ReadGolden(
'text_format_unittest_extensions_data.txt'))
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Merge(golden_text, parsed_message)
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.assertEquals(message, parsed_message)
def testMergeAllFields(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
ascii_text = text_format.MessageToString(message)
parsed_message = unittest_pb2.TestAllTypes()
text_format.Merge(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
test_util.ExpectAllFieldsSet(self, message)
def testMergeAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
ascii_text = text_format.MessageToString(message)
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Merge(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
def testMergeMessageSet(self):
message = unittest_pb2.TestAllTypes()
text = ('repeated_uint64: 1\n'
'repeated_uint64: 2\n')
text_format.Merge(text, message)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Merge(text, message)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEquals(23, message.message_set.Extensions[ext1].i)
self.assertEquals('foo', message.message_set.Extensions[ext2].str)
def testMergeExotic(self):
message = unittest_pb2.TestAllTypes()
text = ('repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string: \n'
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "foo" \'corge\' "grault"\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n'
'repeated_string: "\\xc3\\xbc"\n'
'repeated_string: "\xc3\xbc"\n')
text_format.Merge(text, message)
self.assertEqual(-9223372036854775808, message.repeated_int64[0])
self.assertEqual(18446744073709551615, message.repeated_uint64[0])
self.assertEqual(123.456, message.repeated_double[0])
self.assertEqual(1.23e22, message.repeated_double[1])
self.assertEqual(1.23e-18, message.repeated_double[2])
self.assertEqual(
'\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0])
self.assertEqual('foocorgegrault', message.repeated_string[1])
self.assertEqual(u'\u00fc\ua71f', message.repeated_string[2])
self.assertEqual(u'\u00fc', message.repeated_string[3])
def testMergeEmptyText(self):
message = unittest_pb2.TestAllTypes()
text = ''
text_format.Merge(text, message)
self.assertEquals(unittest_pb2.TestAllTypes(), message)
def testMergeInvalidUtf8(self):
message = unittest_pb2.TestAllTypes()
text = 'repeated_string: "\\xc3\\xc3"'
self.assertRaises(text_format.ParseError, text_format.Merge, text, message)
def testMergeSingleWord(self):
message = unittest_pb2.TestAllTypes()
text = 'foo'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:1 : Message type "protobuf_unittest.TestAllTypes" has no field named '
'"foo".'),
text_format.Merge, text, message)
def testMergeUnknownField(self):
message = unittest_pb2.TestAllTypes()
text = 'unknown_field: 8\n'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:1 : Message type "protobuf_unittest.TestAllTypes" has no field named '
'"unknown_field".'),
text_format.Merge, text, message)
def testMergeBadExtension(self):
message = unittest_pb2.TestAllExtensions()
text = '[unknown_extension]: 8\n'
self.assertRaisesWithMessage(
text_format.ParseError,
'1:2 : Extension "unknown_extension" not registered.',
text_format.Merge, text, message)
message = unittest_pb2.TestAllTypes()
self.assertRaisesWithMessage(
text_format.ParseError,
('1:2 : Message type "protobuf_unittest.TestAllTypes" does not have '
'extensions.'),
text_format.Merge, text, message)
def testMergeGroupNotClosed(self):
message = unittest_pb2.TestAllTypes()
text = 'RepeatedGroup: <'
self.assertRaisesWithMessage(
text_format.ParseError, '1:16 : Expected ">".',
text_format.Merge, text, message)
text = 'RepeatedGroup: {'
self.assertRaisesWithMessage(
text_format.ParseError, '1:16 : Expected "}".',
text_format.Merge, text, message)
def testMergeEmptyGroup(self):
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: {}'
text_format.Merge(text, message)
self.assertTrue(message.HasField('optionalgroup'))
message.Clear()
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: <>'
text_format.Merge(text, message)
self.assertTrue(message.HasField('optionalgroup'))
def testMergeBadEnumValue(self):
message = unittest_pb2.TestAllTypes()
text = 'optional_nested_enum: BARR'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:23 : Enum type "protobuf_unittest.TestAllTypes.NestedEnum" '
'has no value named BARR.'),
text_format.Merge, text, message)
message = unittest_pb2.TestAllTypes()
text = 'optional_nested_enum: 100'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:23 : Enum type "protobuf_unittest.TestAllTypes.NestedEnum" '
'has no value with number 100.'),
text_format.Merge, text, message)
def testMergeBadIntValue(self):
message = unittest_pb2.TestAllTypes()
text = 'optional_int32: bork'
self.assertRaisesWithMessage(
text_format.ParseError,
('1:17 : Couldn\'t parse integer: bork'),
text_format.Merge, text, message)
def assertRaisesWithMessage(self, e_class, e, func, *args, **kwargs):
"""Same as assertRaises, but also compares the exception message."""
if hasattr(e_class, '__name__'):
exc_name = e_class.__name__
else:
exc_name = str(e_class)
try:
func(*args, **kwargs)
except e_class as expr:
if str(expr) != e:
msg = '%s raised, but with wrong message: "%s" instead of "%s"'
raise self.failureException(msg % (exc_name,
str(expr).encode('string_escape'),
e.encode('string_escape')))
return
else:
raise self.failureException('%s not raised' % exc_name)
class TokenizerTest(unittest.TestCase):
def testSimpleTokenCases(self):
text = ('identifier1:"string1"\n \n\n'
'identifier2 : \n \n123 \n identifier3 :\'string\'\n'
'identifiER_4 : 1.1e+2 ID5:-0.23 ID6:\'aaaa\\\'bbbb\'\n'
'ID7 : "aa\\"bb"\n\n\n\n ID8: {A:inf B:-inf C:true D:false}\n'
'ID9: 22 ID10: -111111111111111111 ID11: -22\n'
'ID12: 2222222222222222222 ID13: 1.23456f ID14: 1.2e+2f '
'false_bool: 0 true_BOOL:t \n true_bool1: 1 false_BOOL1:f ' )
tokenizer = text_format._Tokenizer(text)
methods = [(tokenizer.ConsumeIdentifier, 'identifier1'),
':',
(tokenizer.ConsumeString, 'string1'),
(tokenizer.ConsumeIdentifier, 'identifier2'),
':',
(tokenizer.ConsumeInt32, 123),
(tokenizer.ConsumeIdentifier, 'identifier3'),
':',
(tokenizer.ConsumeString, 'string'),
(tokenizer.ConsumeIdentifier, 'identifiER_4'),
':',
(tokenizer.ConsumeFloat, 1.1e+2),
(tokenizer.ConsumeIdentifier, 'ID5'),
':',
(tokenizer.ConsumeFloat, -0.23),
(tokenizer.ConsumeIdentifier, 'ID6'),
':',
(tokenizer.ConsumeString, 'aaaa\'bbbb'),
(tokenizer.ConsumeIdentifier, 'ID7'),
':',
(tokenizer.ConsumeString, 'aa\"bb'),
(tokenizer.ConsumeIdentifier, 'ID8'),
':',
'{',
(tokenizer.ConsumeIdentifier, 'A'),
':',
(tokenizer.ConsumeFloat, float('inf')),
(tokenizer.ConsumeIdentifier, 'B'),
':',
(tokenizer.ConsumeFloat, -float('inf')),
(tokenizer.ConsumeIdentifier, 'C'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'D'),
':',
(tokenizer.ConsumeBool, False),
'}',
(tokenizer.ConsumeIdentifier, 'ID9'),
':',
(tokenizer.ConsumeUint32, 22),
(tokenizer.ConsumeIdentifier, 'ID10'),
':',
(tokenizer.ConsumeInt64, -111111111111111111),
(tokenizer.ConsumeIdentifier, 'ID11'),
':',
(tokenizer.ConsumeInt32, -22),
(tokenizer.ConsumeIdentifier, 'ID12'),
':',
(tokenizer.ConsumeUint64, 2222222222222222222),
(tokenizer.ConsumeIdentifier, 'ID13'),
':',
(tokenizer.ConsumeFloat, 1.23456),
(tokenizer.ConsumeIdentifier, 'ID14'),
':',
(tokenizer.ConsumeFloat, 1.2e+2),
(tokenizer.ConsumeIdentifier, 'false_bool'),
':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'true_BOOL'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'true_bool1'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'false_BOOL1'),
':',
(tokenizer.ConsumeBool, False)]
i = 0
while not tokenizer.AtEnd():
m = methods[i]
if type(m) == str:
token = tokenizer.token
self.assertEqual(token, m)
tokenizer.NextToken()
else:
self.assertEqual(m[1], m[0]())
i += 1
def testConsumeIntegers(self):
# This test only tests the failures in the integer parsing methods as well
# as the '0' special cases.
int64_max = (1 << 63) - 1
uint32_max = (1 << 32) - 1
text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint64)
self.assertEqual(-1, tokenizer.ConsumeInt32())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt32)
self.assertEqual(uint32_max + 1, tokenizer.ConsumeInt64())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt64)
self.assertEqual(int64_max + 1, tokenizer.ConsumeUint64())
self.assertTrue(tokenizer.AtEnd())
text = '-0 -0 0 0'
tokenizer = text_format._Tokenizer(text)
self.assertEqual(0, tokenizer.ConsumeUint32())
self.assertEqual(0, tokenizer.ConsumeUint64())
self.assertEqual(0, tokenizer.ConsumeUint32())
self.assertEqual(0, tokenizer.ConsumeUint64())
self.assertTrue(tokenizer.AtEnd())
def testConsumeByteString(self):
text = '"string1\''
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = 'string1"'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\xt"'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\"'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\x"'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
def testConsumeBool(self):
text = 'not-a-bool'
tokenizer = text_format._Tokenizer(text)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeBool)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
ItsAdventureTime/fail2ban
|
fail2ban/server/faildata.py
|
5
|
1896
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Cyril Jaquier
#
__author__ = "Cyril Jaquier"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
from ..helpers import getLogger
# Gets the instance of the logger.
logSys = getLogger(__name__)
class FailData:
def __init__(self):
self.__retry = 0
self.__lastTime = 0
self.__lastReset = 0
self.__matches = []
def setRetry(self, value):
self.__retry = value
# keep only the last matches or reset entirely
# Explicit if/else for compatibility with Python 2.4
if value:
self.__matches = self.__matches[-min(len(self.__matches, value)):]
else:
self.__matches = []
def getRetry(self):
return self.__retry
def getMatches(self):
return self.__matches
def inc(self, matches=None):
self.__retry += 1
self.__matches += matches or []
def setLastTime(self, value):
if value > self.__lastTime:
self.__lastTime = value
def getLastTime(self):
return self.__lastTime
def getLastReset(self):
return self.__lastReset
def setLastReset(self, value):
self.__lastReset = value
|
gpl-2.0
|
felliott/scrapi
|
tasks.py
|
1
|
8712
|
import os
import base64
import logging
import platform
from datetime import date, timedelta
from invoke import run, task
# from elasticsearch import helpers
# from dateutil.parser import parse
# from six.moves.urllib import parse as urllib_parse
# import scrapi.harvesters # noqa
# from scrapi import linter
# from scrapi import registry
# from scrapi import settings
logger = logging.getLogger()
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
@task
def reindex(src, dest):
from elasticsearch import helpers
from scrapi.processing.elasticsearch import DatabaseManager
dm = DatabaseManager()
dm.setup()
helpers.reindex(dm.es, src, dest)
dm.es.indices.delete(src)
@task
def alias(alias, index):
from scrapi.processing.elasticsearch import DatabaseManager
dm = DatabaseManager()
dm.setup()
dm.es.indices.delete_alias(index=alias, name='_all', ignore=404)
dm.es.indices.put_alias(alias, index)
@task
def migrate(migration, sources=None, kwargs_string=None, dry=True, async=False, group_size=1000):
''' Task to run a migration.
:param migration: The migration function to run. This is passed in
as a string then interpreted as a function by the invoke task.
:type migration: str
:param kwargs_string: parsed into an optional set of keyword
arguments, so that the invoke migrate task can accept a variable
number of arguments for each migration.
The kwargs_string should be in the following format:
'key:value, key2:value2'
...with the keys and values seperated by colons, and each kwarg seperated
by commas.
:type kwarg_string: str
An example of usage renaming mit to mit 2 as a real run would be:
inv migrate rename -s mit -k 'target:mit2' --no-dry
An example of calling renormalize on two sources as an async dry run:
inv migrate renormalize -s 'mit,asu' -a
'''
kwargs_string = kwargs_string or ':'
sources = sources or ''
from scrapi import migrations
from scrapi.tasks import migrate
kwargs = {}
for key, val in map(lambda x: x.split(':'), kwargs_string.split(',')):
key, val = key.strip(), val.strip()
if key not in kwargs.keys():
kwargs[key] = val
elif isinstance(kwargs[key], list):
kwargs[key].append(val)
else:
kwargs[key] = [kwargs[key], val]
kwargs['dry'] = dry
kwargs['async'] = async
kwargs['group_size'] = group_size
kwargs['sources'] = list(map(lambda x: x.strip(), sources.split(',')))
if kwargs['sources'] == ['']:
kwargs.pop('sources')
migrate_func = migrations.__dict__[migration]
migrate(migrate_func, **kwargs)
@task
def migrate_to_source_partition(dry=True, async=False):
from scrapi.tasks import migrate_to_source_partition
migrate_to_source_partition(dry=dry, async=async)
@task
def reset_search():
run("curl -XPOST 'http://localhost:9200/_shutdown'")
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch restart")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
@task
def elasticsearch():
'''Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
'''
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch restart")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
else:
print(
"Your system is not recognized, you will have to start elasticsearch manually")
@task
def test(cov=True, doctests=True, verbose=False, debug=False, pdb=False):
"""
Runs all tests in the 'tests/' directory
"""
cmd = 'py.test scrapi tests'
if doctests:
cmd += ' --doctest-modules'
if verbose:
cmd += ' -v'
if debug:
cmd += ' -s'
if cov:
cmd += ' --cov-report term-missing --cov-config .coveragerc --cov scrapi --cov api'
if pdb:
cmd += ' --pdb'
run(cmd, pty=True)
@task
def wheelhouse(develop=False):
req_file = 'dev-requirements.txt' if develop else 'requirements.txt'
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def requirements(develop=False, upgrade=False):
req_file = 'dev-requirements.txt' if develop else 'requirements.txt'
cmd = 'pip install -r {}'.format(req_file)
if upgrade:
cmd += ' --upgrade'
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def beat(setup=True):
from scrapi import registry
from scrapi.tasks import app
# Set up the provider map for elasticsearch
if setup:
provider_map(delete=True)
app.conf['CELERYBEAT_SCHEDULE'] = registry.beat_schedule
app.Beat().run()
@task
def worker(loglevel='INFO', hostname='%h', autoreload=False):
from scrapi.tasks import app
command = ['worker']
if loglevel:
command.extend(['--loglevel', loglevel])
if hostname:
command.extend(['--hostname', hostname])
if autoreload:
command.extend(['--autoreload'])
app.worker_main(command)
@task
def harvester(harvester_name, async=False, start=None, end=None):
from scrapi import settings
settings.CELERY_ALWAYS_EAGER = not async
from scrapi import registry
from scrapi.tasks import run_harvester
from dateutil.parser import parse
if not registry.get(harvester_name):
raise ValueError('No such harvesters {}'.format(harvester_name))
end = parse(end).date() if end else date.today()
start = parse(start).date() if start else end - timedelta(settings.DAYS_BACK)
run_harvester.delay(harvester_name, start_date=start, end_date=end)
@task
def harvesters(async=False, start=None, end=None):
from scrapi import settings
settings.CELERY_ALWAYS_EAGER = not async
from scrapi import registry
from scrapi.tasks import run_harvester
from dateutil.parser import parse
start = parse(start).date() if start else date.today() - timedelta(settings.DAYS_BACK)
end = parse(end).date() if end else date.today()
exceptions = []
for harvester_name in registry.keys():
try:
run_harvester.delay(harvester_name, start_date=start, end_date=end)
except Exception as e:
logger.exception(e)
exceptions.append(e)
logger.info("\n\nNumber of exceptions: {}".format(len(exceptions)))
for exception in exceptions:
logger.exception(e)
@task
def lint_all():
from scrapi import registry
for name in registry.keys():
lint(name)
@task
def lint(name):
from scrapi import linter
from scrapi import registry
harvester = registry[name]
try:
linter.lint(harvester.harvest, harvester.normalize)
except Exception as e:
print('Harvester {} raise the following exception'.format(harvester.short_name))
print(e)
@task
def provider_map(delete=False):
from six.moves.urllib import parse as urllib_parse
from scrapi import registry
from scrapi.processing.elasticsearch import DatabaseManager
dm = DatabaseManager()
dm.setup()
es = dm.es
if delete:
es.indices.delete(index='share_providers', ignore=[404])
for harvester_name, harvester in registry.items():
with open("img/favicons/{}_favicon.ico".format(harvester.short_name), "rb") as f:
favicon = urllib_parse.quote(base64.encodestring(f.read()))
es.index(
'share_providers',
harvester.short_name,
body={
'favicon': 'data:image/png;base64,' + favicon,
'short_name': harvester.short_name,
'long_name': harvester.long_name,
'url': harvester.url
},
id=harvester.short_name,
refresh=True
)
print(es.count('share_providers', body={'query': {'match_all': {}}})['count'])
@task
def apiserver():
os.system('python manage.py runserver')
@task
def apidb():
os.system('python manage.py migrate')
@task
def reset_all():
import sys
from scrapi import settings
if sys.version[0] == "3":
raw_input = input
if raw_input('Are you sure? y/N ') != 'y':
return
os.system('psql -c "DROP DATABASE scrapi;"')
os.system('psql -c "CREATE DATABASE scrapi;"')
os.system('python manage.py migrate')
os.system("curl -XDELETE '{}/share*'".format(settings.ELASTIC_URI))
os.system("invoke alias share share_v2")
os.system("invoke provider_map")
|
apache-2.0
|
2014cdag7/2014cdag7
|
wsgi/static/Brython2.1.0-20140419-113919/Lib/unittest/test/test_suite.py
|
791
|
12066
|
import unittest
import sys
from .support import LoggingResult, TestEquality
### Support code for Test_TestSuite
################################################################
class Test(object):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def test_3(self): pass
def runTest(self): pass
def _mk_TestSuite(*names):
return unittest.TestSuite(Test.Foo(n) for n in names)
################################################################
class Test_TestSuite(unittest.TestCase, TestEquality):
### Set up attributes needed by inherited tests
################################################################
# Used by TestEquality.test_eq
eq_pairs = [(unittest.TestSuite(), unittest.TestSuite())
,(unittest.TestSuite(), unittest.TestSuite([]))
,(_mk_TestSuite('test_1'), _mk_TestSuite('test_1'))]
# Used by TestEquality.test_ne
ne_pairs = [(unittest.TestSuite(), _mk_TestSuite('test_1'))
,(unittest.TestSuite([]), _mk_TestSuite('test_1'))
,(_mk_TestSuite('test_1', 'test_2'), _mk_TestSuite('test_1', 'test_3'))
,(_mk_TestSuite('test_1'), _mk_TestSuite('test_2'))]
################################################################
### /Set up attributes needed by inherited tests
### Tests for TestSuite.__init__
################################################################
# "class TestSuite([tests])"
#
# The tests iterable should be optional
def test_init__tests_optional(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should deal with empty tests iterables by allowing the
# creation of an empty suite
def test_init__empty_tests(self):
suite = unittest.TestSuite([])
self.assertEqual(suite.countTestCases(), 0)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# TestSuite should allow any iterable to provide tests
def test_init__tests_from_any_iterable(self):
def tests():
yield unittest.FunctionTestCase(lambda: None)
yield unittest.FunctionTestCase(lambda: None)
suite_1 = unittest.TestSuite(tests())
self.assertEqual(suite_1.countTestCases(), 2)
suite_2 = unittest.TestSuite(suite_1)
self.assertEqual(suite_2.countTestCases(), 2)
suite_3 = unittest.TestSuite(set(suite_1))
self.assertEqual(suite_3.countTestCases(), 2)
# "class TestSuite([tests])"
# ...
# "If tests is given, it must be an iterable of individual test cases
# or other test suites that will be used to build the suite initially"
#
# Does TestSuite() also allow other TestSuite() instances to be present
# in the tests iterable?
def test_init__TestSuite_instances_in_tests(self):
def tests():
ftc = unittest.FunctionTestCase(lambda: None)
yield unittest.TestSuite([ftc])
yield unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite(tests())
self.assertEqual(suite.countTestCases(), 2)
################################################################
### /Tests for TestSuite.__init__
# Container types should support the iter protocol
def test_iter(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(list(suite), [test1, test2])
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite returns 0?
def test_countTestCases_zero_simple(self):
suite = unittest.TestSuite()
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Presumably an empty TestSuite (even if it contains other empty
# TestSuite instances) returns 0?
def test_countTestCases_zero_nested(self):
class Test1(unittest.TestCase):
def test(self):
pass
suite = unittest.TestSuite([unittest.TestSuite()])
self.assertEqual(suite.countTestCases(), 0)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
def test_countTestCases_simple(self):
test1 = unittest.FunctionTestCase(lambda: None)
test2 = unittest.FunctionTestCase(lambda: None)
suite = unittest.TestSuite((test1, test2))
self.assertEqual(suite.countTestCases(), 2)
# "Return the number of tests represented by the this test object.
# ...this method is also implemented by the TestSuite class, which can
# return larger [greater than 1] values"
#
# Make sure this holds for nested TestSuite instances, too
def test_countTestCases_nested(self):
class Test1(unittest.TestCase):
def test1(self): pass
def test2(self): pass
test2 = unittest.FunctionTestCase(lambda: None)
test3 = unittest.FunctionTestCase(lambda: None)
child = unittest.TestSuite((Test1('test2'), test2))
parent = unittest.TestSuite((test3, child, Test1('test1')))
self.assertEqual(parent.countTestCases(), 4)
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
#
# And if there are no tests? What then?
def test_run__empty_suite(self):
events = []
result = LoggingResult(events)
suite = unittest.TestSuite()
suite.run(result)
self.assertEqual(events, [])
# "Note that unlike TestCase.run(), TestSuite.run() requires the
# "result object to be passed in."
def test_run__requires_result(self):
suite = unittest.TestSuite()
try:
suite.run()
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
# "Run the tests associated with this suite, collecting the result into
# the test result object passed as result."
def test_run(self):
events = []
result = LoggingResult(events)
class LoggingCase(unittest.TestCase):
def run(self, result):
events.append('run %s' % self._testMethodName)
def test1(self): pass
def test2(self): pass
tests = [LoggingCase('test1'), LoggingCase('test2')]
unittest.TestSuite(tests).run(result)
self.assertEqual(events, ['run test1', 'run test2'])
# "Add a TestCase ... to the suite"
def test_addTest__TestCase(self):
class Foo(unittest.TestCase):
def test(self): pass
test = Foo('test')
suite = unittest.TestSuite()
suite.addTest(test)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [test])
# "Add a ... TestSuite to the suite"
def test_addTest__TestSuite(self):
class Foo(unittest.TestCase):
def test(self): pass
suite_2 = unittest.TestSuite([Foo('test')])
suite = unittest.TestSuite()
suite.addTest(suite_2)
self.assertEqual(suite.countTestCases(), 1)
self.assertEqual(list(suite), [suite_2])
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
def test_addTests(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
test_1 = Foo('test_1')
test_2 = Foo('test_2')
inner_suite = unittest.TestSuite([test_2])
def gen():
yield test_1
yield test_2
yield inner_suite
suite_1 = unittest.TestSuite()
suite_1.addTests(gen())
self.assertEqual(list(suite_1), list(gen()))
# "This is equivalent to iterating over tests, calling addTest() for
# each element"
suite_2 = unittest.TestSuite()
for t in gen():
suite_2.addTest(t)
self.assertEqual(suite_1, suite_2)
# "Add all the tests from an iterable of TestCase and TestSuite
# instances to this test suite."
#
# What happens if it doesn't get an iterable?
def test_addTest__noniterable(self):
suite = unittest.TestSuite()
try:
suite.addTests(5)
except TypeError:
pass
else:
self.fail("Failed to raise TypeError")
def test_addTest__noncallable(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, 5)
def test_addTest__casesuiteclass(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTest, Test_TestSuite)
self.assertRaises(TypeError, suite.addTest, unittest.TestSuite)
def test_addTests__string(self):
suite = unittest.TestSuite()
self.assertRaises(TypeError, suite.addTests, "foo")
def test_function_in_suite(self):
def f(_):
pass
suite = unittest.TestSuite()
suite.addTest(f)
# when the bug is fixed this line will not crash
suite.run(unittest.TestResult())
def test_basetestsuite(self):
class Test(unittest.TestCase):
wasSetUp = False
wasTornDown = False
@classmethod
def setUpClass(cls):
cls.wasSetUp = True
@classmethod
def tearDownClass(cls):
cls.wasTornDown = True
def testPass(self):
pass
def testFail(self):
fail
class Module(object):
wasSetUp = False
wasTornDown = False
@staticmethod
def setUpModule():
Module.wasSetUp = True
@staticmethod
def tearDownModule():
Module.wasTornDown = True
Test.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.BaseTestSuite()
suite.addTests([Test('testPass'), Test('testFail')])
self.assertEqual(suite.countTestCases(), 2)
result = unittest.TestResult()
suite.run(result)
self.assertFalse(Module.wasSetUp)
self.assertFalse(Module.wasTornDown)
self.assertFalse(Test.wasSetUp)
self.assertFalse(Test.wasTornDown)
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 2)
def test_overriding_call(self):
class MySuite(unittest.TestSuite):
called = False
def __call__(self, *args, **kw):
self.called = True
unittest.TestSuite.__call__(self, *args, **kw)
suite = MySuite()
result = unittest.TestResult()
wrapper = unittest.TestSuite()
wrapper.addTest(suite)
wrapper(result)
self.assertTrue(suite.called)
# reusing results should be permitted even if abominable
self.assertFalse(result._testRunEntered)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
KrzysztofStachanczyk/Sensors-WWW-website
|
www/env/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py
|
1730
|
3405
|
"""A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
|
gpl-3.0
|
scenarios/tensorflow
|
tensorflow/contrib/bayesflow/python/kernel_tests/variational_inference_test.py
|
7
|
5651
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variational inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.contrib import layers
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor
from tensorflow.contrib.bayesflow.python.ops import variational_inference
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.distributions.python.ops import normal
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
st = stochastic_tensor
vi = variational_inference
distributions = distributions_lib
class NormalNoEntropy(distributions.Normal):
def entropy(self):
raise NotImplementedError("entropy not implemented")
# For mini-VAE
def inference_net(x, latent_size):
return layers.linear(x, latent_size)
def generative_net(z, data_size):
return layers.linear(z, data_size)
def mini_vae():
x = [[-6., 3., 6.], [-8., 4., 8.]]
prior = distributions.Normal(mu=0., sigma=1.)
variational = st.StochasticTensor(
distributions.Normal(
mu=inference_net(x, 1), sigma=1.))
vi.register_prior(variational, prior)
px = distributions.Normal(mu=generative_net(variational, 3), sigma=1.)
log_likelihood = math_ops.reduce_sum(px.log_prob(x), 1)
log_likelihood = array_ops.expand_dims(log_likelihood, -1)
return x, prior, variational, px, log_likelihood
class VariationalInferenceTest(test.TestCase):
def testDefaultVariationalAndPrior(self):
_, prior, variational, _, log_likelihood = mini_vae()
elbo = vi.elbo(log_likelihood)
expected_elbo = log_likelihood - kullback_leibler.kl(
variational.distribution, prior)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllEqual(*sess.run([expected_elbo, elbo]))
def testExplicitVariationalAndPrior(self):
with self.test_session() as sess:
_, _, variational, _, log_likelihood = mini_vae()
prior = normal.Normal(mu=3., sigma=2.)
elbo = vi.elbo(
log_likelihood, variational_with_prior={variational: prior})
expected_elbo = log_likelihood - kullback_leibler.kl(
variational.distribution, prior)
sess.run(variables.global_variables_initializer())
self.assertAllEqual(*sess.run([expected_elbo, elbo]))
def testExplicitForms(self):
_, prior, variational, _, log_likelihood = mini_vae()
elbos = []
forms = vi.ELBOForms
for form in [
forms.default, forms.analytic_kl, forms.sample, forms.analytic_entropy
]:
elbo = vi.elbo(
log_likelihood=log_likelihood,
variational_with_prior={variational: prior},
form=form)
elbos.append(elbo)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
log_likelihood_shape = array_ops.shape(log_likelihood).eval()
for elbo in elbos:
elbo.eval()
elbo_shape = array_ops.shape(elbo).eval()
self.assertAllEqual(log_likelihood_shape, elbo_shape)
self.assertEqual(elbo.dtype, log_likelihood.dtype)
def testDefaultsSampleKLWithoutAnalyticKLOrEntropy(self):
x = constant_op.constant([[-6., 3., 6.]])
prior = distributions.Bernoulli(0.5)
variational = st.StochasticTensor(
NormalNoEntropy(
mu=inference_net(x, 1), sigma=1.))
vi.register_prior(variational, prior)
px = distributions.Normal(mu=generative_net(variational, 3), sigma=1.)
log_likelihood = math_ops.reduce_sum(px.log_prob(x), 1)
# No analytic KL available between prior and variational distributions.
with self.assertRaisesRegexp(NotImplementedError, "No KL"):
distributions.kl(variational.distribution, prior)
elbo = vi.elbo(
variational_with_prior={variational: prior},
log_likelihood=log_likelihood)
expected_elbo = log_likelihood + prior.log_prob(
variational) - variational.distribution.log_prob(variational)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllEqual(*sess.run([expected_elbo, elbo]))
def testElboWithLogJoint(self):
with self.test_session() as sess:
_, prior, variational, _, log_likelihood = mini_vae()
log_joint = log_likelihood + prior.log_prob(variational)
elbo = vi.elbo_with_log_joint(log_joint)
sess.run(variables.global_variables_initializer())
elbo.eval()
if __name__ == "__main__":
test.main()
|
apache-2.0
|
40223149/2015springfinal
|
static/Brython3.1.1-20150328-091302/Lib/reprlib.py
|
923
|
5110
|
"""Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
import builtins
from itertools import islice
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'set([', '])', self.maxset)
def repr_frozenset(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset([', '])',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = builtins.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = builtins.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = builtins.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = builtins.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr
|
gpl-3.0
|
Kast0rTr0y/ansible
|
lib/ansible/modules/inventory/group_by.py
|
50
|
1473
|
# -*- mode: python -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: group_by
short_description: Create Ansible groups based on facts
description:
- Use facts to create ad-hoc groups that can be used later in a playbook.
version_added: "0.9"
options:
key:
description:
- The variables whose values will be used as groups
required: true
author: "Jeroen Hoekx (@jhoekx)"
notes:
- Spaces in group names are converted to dashes '-'.
'''
EXAMPLES = '''
# Create groups based on the machine architecture
- group_by:
key: machine_{{ ansible_machine }}
# Create groups like 'kvm-host'
- group_by:
key: virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }}
'''
|
gpl-3.0
|
ericchaves/faker
|
faker/providers/python/__init__.py
|
8
|
4613
|
# coding=utf-8
from __future__ import unicode_literals
from decimal import Decimal
import sys
from faker.providers.lorem.la import Provider as Lorem
from .. import BaseProvider
if sys.version_info[0] == 2:
string_types = (basestring,)
elif sys.version_info[0] == 3:
string_types = (str, bytes)
else:
raise SystemError("Unrecognized python version: {}".format(sys.version_info[0]))
class Provider(BaseProvider):
@classmethod
def pybool(cls):
return cls.random_int(0, 1) == 1
@classmethod
def pystr(cls, max_chars=20):
return Lorem.text(max_chars)
@classmethod
def pyfloat(cls, left_digits=None, right_digits=None, positive=False):
left_digits = left_digits or cls.random_int(1, sys.float_info.dig)
right_digits = right_digits or cls.random_int(0, sys.float_info.dig - left_digits)
sign = 1 if positive or cls.random_int(0, 1) else -1
return float("{0}.{1}".format(
sign * cls.random_number(left_digits), cls.random_number(right_digits)
))
@classmethod
def pyint(cls):
return cls.random_int()
@classmethod
def pydecimal(cls, left_digits=None, right_digits=None, positive=False):
return Decimal(str(cls.pyfloat(left_digits, right_digits, positive)))
def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types):
return tuple(self.pyset(nb_elements, variable_nb_elements, *value_types))
def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types):
return set(self._pyiterable(nb_elements, variable_nb_elements, *value_types))
def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types):
return list(self._pyiterable(nb_elements, variable_nb_elements, *value_types))
def pyiterable(self, nb_elements=10, variable_nb_elements=True, *value_types):
return self.random_element([self.pylist, self.pytuple, self.pyset])(nb_elements, variable_nb_elements, *value_types)
def _random_type(self, type_list):
value_type = self.random_element(type_list)
method_name = "py{0}".format(value_type)
if hasattr(self, method_name):
value_type = method_name
return self.generator.format(value_type)
def _pyiterable(self, nb_elements=10, variable_nb_elements=True, *value_types):
value_types = [t if isinstance(t, string_types) else getattr(t, '__name__', type(t).__name__).lower()
for t in value_types
# avoid recursion
if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]
if not value_types:
value_types = ['str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal', 'date_time', 'uri', 'email']
if variable_nb_elements:
nb_elements = self.randomize_nb_elements(nb_elements)
for f in range(nb_elements):
yield self._random_type(value_types)
def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types):
"""
Use this function to generate data, returns a touple containing
a list, a dictionary and a nested dictionary.
"""
if variable_nb_elements:
nb_elements = self.randomize_nb_elements(nb_elements)
return dict(zip(
Lorem.words(nb_elements),
self._pyiterable(nb_elements, False, *value_types)
))
def pystruct(self, count=10, *value_types):
value_types = [t if isinstance(t, string_types) else getattr(t, '__name__', type(t).__name__).lower()
for t in value_types
# avoid recursion
if t != 'struct']
if not value_types:
value_types = ['str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal', 'date_time', 'uri', 'email']
l = []
d = {}
nd = {}
for i in range(count):
d[Lorem.word()] = self._random_type(value_types)
l.append(self._random_type(value_types))
nd[Lorem.word()] = {
i: self._random_type(value_types),
i + 1: [self._random_type(value_types), self._random_type(value_types), self._random_type(value_types)],
i + 2: {
i: self._random_type(value_types),
i + 1: self._random_type(value_types),
i + 2: [
self._random_type(value_types),
self._random_type(value_types)
]
}
}
return l, d, nd
|
mit
|
p4datasystems/CarnotKE
|
jyhton/Lib/test/test_sort.py
|
10
|
10661
|
from test import test_support
import random
import sys
import unittest
try:
import java
except ImportError:
pass
verbose = test_support.verbose
nerrors = 0
def check(tag, expected, raw, compare=None):
global nerrors
if verbose:
print " checking", tag
orig = raw[:] # save input in case of error
if compare:
raw.sort(compare)
else:
raw.sort()
if len(expected) != len(raw):
print "error in", tag
print "length mismatch;", len(expected), len(raw)
print expected
print orig
print raw
nerrors += 1
return
for i, good in enumerate(expected):
maybe = raw[i]
if good is not maybe:
print "error in", tag
print "out of order at index", i, good, maybe
print expected
print orig
print raw
nerrors += 1
return
class TestBase(unittest.TestCase):
def testStressfully(self):
# Try a variety of sizes at and around powers of 2, and at powers of 10.
sizes = [0]
for power in range(1, 10):
n = 2 ** power
sizes.extend(range(n-1, n+2))
sizes.extend([10, 100, 1000])
class Complains(object):
maybe_complain = True
def __init__(self, i):
self.i = i
def __lt__(self, other):
if Complains.maybe_complain and random.random() < 0.001:
if verbose:
print " complaining at", self, other
raise RuntimeError
return self.i < other.i
def __repr__(self):
return "Complains(%d)" % self.i
class Stable(object):
def __init__(self, key, i):
self.key = key
self.index = i
def __cmp__(self, other):
return cmp(self.key, other.key)
__hash__ = None # Silence Py3k warning
def __repr__(self):
return "Stable(%d, %d)" % (self.key, self.index)
for n in sizes:
x = range(n)
if verbose:
print "Testing size", n
s = x[:]
check("identity", x, s)
s = x[:]
s.reverse()
check("reversed", x, s)
s = x[:]
random.shuffle(s)
check("random permutation", x, s)
y = x[:]
y.reverse()
s = x[:]
check("reversed via function", y, s, lambda a, b: cmp(b, a))
if verbose:
print " Checking against an insane comparison function."
print " If the implementation isn't careful, this may segfault."
s = x[:]
if test_support.is_jython:
try:
s.sort(lambda a, b: int(random.random() * 3) - 1)
except java.lang.IllegalArgumentException:
pass
else:
s.sort(lambda a, b: int(random.random() * 3) - 1)
check("an insane function left some permutation", x, s)
x = [Complains(i) for i in x]
s = x[:]
random.shuffle(s)
Complains.maybe_complain = True
it_complained = False
try:
s.sort()
except RuntimeError:
it_complained = True
if it_complained:
Complains.maybe_complain = False
check("exception during sort left some permutation", x, s)
s = [Stable(random.randrange(10), i) for i in xrange(n)]
augmented = [(e, e.index) for e in s]
augmented.sort() # forced stable because ties broken by index
x = [e for e, i in augmented] # a stable sort of s
check("stability", x, s)
#==============================================================================
class TestBugs(unittest.TestCase):
def test_bug453523(self):
# bug 453523 -- list.sort() crasher.
# If this fails, the most likely outcome is a core dump.
# Mutations during a list sort should raise a ValueError.
class C:
def __lt__(self, other):
if L and random.random() < 0.75:
L.pop()
else:
L.append(3)
return random.random() < 0.5
L = [C() for i in range(50)]
self.assertRaises(ValueError, L.sort)
def test_cmpNone(self):
# Testing None as a comparison function.
L = range(50)
random.shuffle(L)
L.sort(None)
self.assertEqual(L, range(50))
def test_undetected_mutation(self):
# Python 2.4a1 did not always detect mutation
memorywaster = []
for i in range(20):
def mutating_cmp(x, y):
L.append(3)
L.pop()
return cmp(x, y)
L = [1,2]
self.assertRaises(ValueError, L.sort, mutating_cmp)
def mutating_cmp(x, y):
L.append(3)
del L[:]
return cmp(x, y)
self.assertRaises(ValueError, L.sort, mutating_cmp)
memorywaster = [memorywaster]
#==============================================================================
class TestDecorateSortUndecorate(unittest.TestCase):
def test_decorated(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
copy = data[:]
random.shuffle(data)
data.sort(key=str.lower)
copy.sort(cmp=lambda x,y: cmp(x.lower(), y.lower()))
def test_baddecorator(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, data.sort, None, lambda x,y: 0)
def test_stability(self):
data = [(random.randrange(100), i) for i in xrange(200)]
copy = data[:]
data.sort(key=lambda x: x[0]) # sort on the random first field
copy.sort() # sort using both fields
self.assertEqual(data, copy) # should get the same result
def test_cmp_and_key_combination(self):
# Verify that the wrapper has been removed
def compare(x, y):
self.assertEqual(type(x), str)
self.assertEqual(type(x), str)
return cmp(x, y)
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
data.sort(cmp=compare, key=str.lower)
def test_badcmp_with_key(self):
# Verify that the wrapper has been removed
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, data.sort, "bad", str.lower)
def test_key_with_exception(self):
# Verify that the wrapper has been removed
data = range(-2,2)
dup = data[:]
self.assertRaises(ZeroDivisionError, data.sort, None, lambda x: 1 // x)
self.assertEqual(data, dup)
# for jython, we have a different storage mechanism for this in our
# implementation of MergeState; given that this is likely to go away,
# this doesn't seem so important
@unittest.skipIf(test_support.is_jython,
"Jython has a different implementation of MergeSort")
def test_key_with_mutation(self):
data = range(10)
def k(x):
del data[:]
data[:] = range(20)
return x
self.assertRaises(ValueError, data.sort, key=k)
# The function passed to the "key" argument changes the data upon which
# sort is invoked. It can not be checked if that function changes data as
# long as it is invoked(e.g. __del__ in SortKiller). so skipping for now.
@unittest.skipIf(test_support.is_jython, "Doesn't work for Jython")
def test_key_with_mutating_del(self):
data = range(10)
class SortKiller(object):
def __init__(self, x):
pass
def __del__(self):
del data[:]
data[:] = range(20)
self.assertRaises(ValueError, data.sort, key=SortKiller)
# The function passed to the "key" argument changes the data upon which
# sort is invoked. It can not be checked if that function changes data as
# long as it is invoked(e.g. __del__ in SortKiller). so skipping for now.
@unittest.skipIf(test_support.is_jython, "Doesn't work for Jython")
def test_key_with_mutating_del_and_exception(self):
data = range(10)
## dup = data[:]
class SortKiller(object):
def __init__(self, x):
if x > 2:
raise RuntimeError
def __del__(self):
del data[:]
data[:] = range(20)
self.assertRaises(RuntimeError, data.sort, key=SortKiller)
## major honking subtlety: we *can't* do:
##
## self.assertEqual(data, dup)
##
## because there is a reference to a SortKiller in the
## traceback and by the time it dies we're outside the call to
## .sort() and so the list protection gimmicks are out of
## date (this cost some brain cells to figure out...).
def test_reverse(self):
data = range(100)
random.shuffle(data)
data.sort(reverse=True)
self.assertEqual(data, range(99,-1,-1))
self.assertRaises(TypeError, data.sort, "wrong type")
def test_reverse_stability(self):
data = [(random.randrange(100), i) for i in xrange(200)]
copy1 = data[:]
copy2 = data[:]
data.sort(cmp=lambda x,y: cmp(x[0],y[0]), reverse=True)
copy1.sort(cmp=lambda x,y: cmp(y[0],x[0]))
self.assertEqual(data, copy1)
copy2.sort(key=lambda x: x[0], reverse=True)
self.assertEqual(data, copy2)
#==============================================================================
def test_main(verbose=None):
test_classes = (
TestBase,
TestDecorateSortUndecorate,
TestBugs,
)
with test_support.check_py3k_warnings(
("the cmp argument is not supported", DeprecationWarning)):
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
|
apache-2.0
|
ltilve/ChromiumGStreamerBackend
|
chrome/common/extensions/docs/server2/rietveld_patcher_test.py
|
36
|
2773
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import posixpath
import sys
import unittest
from environment_wrappers import CreateUrlFetcher
from extensions_paths import (
ARTICLES_TEMPLATES, CHROME_EXTENSIONS, DOCS, JSON_TEMPLATES,
PUBLIC_TEMPLATES)
from fake_fetchers import ConfigureFakeFetchers
from file_system import FileNotFoundError
from rietveld_patcher import RietveldPatcher
from test_util import Server2Path
import url_constants
def _PrefixWith(prefix, lst):
return [posixpath.join(prefix, item) for item in lst]
class RietveldPatcherTest(unittest.TestCase):
def setUp(self):
ConfigureFakeFetchers()
self._patcher = RietveldPatcher(
'14096030',
CreateUrlFetcher(url_constants.CODEREVIEW_SERVER))
def _ReadLocalFile(self, filename):
with open(Server2Path('test_data',
'rietveld_patcher',
'expected',
filename), 'r') as f:
return f.read()
def _ApplySingle(self, path):
return self._patcher.Apply([path], None).Get()[path]
def testGetVersion(self):
self.assertEqual(self._patcher.GetVersion(), '22002')
def testGetPatchedFiles(self):
added, deleted, modified = self._patcher.GetPatchedFiles()
self.assertEqual(
sorted(added),
_PrefixWith(DOCS, ['examples/test',
'templates/articles/test_foo.html',
'templates/public/extensions/test_foo.html']))
self.assertEqual(deleted,
['%sextensions/runtime.html' % PUBLIC_TEMPLATES])
self.assertEqual(
sorted(modified),
_PrefixWith(CHROME_EXTENSIONS,
['api/test.json',
'docs/templates/json/extensions_sidenav.json',
'manifest.h']))
def testApply(self):
article_path = '%stest_foo.html' % ARTICLES_TEMPLATES
# Apply to an added file.
self.assertEqual(
self._ReadLocalFile('test_foo.html'),
self._ApplySingle('%sextensions/test_foo.html' % PUBLIC_TEMPLATES))
# Apply to a modified file.
self.assertEqual(
self._ReadLocalFile('extensions_sidenav.json'),
self._ApplySingle('%sextensions_sidenav.json' % JSON_TEMPLATES))
# Applying to a deleted file doesn't throw exceptions. It just returns
# empty content.
# self.assertRaises(FileNotFoundError, self._ApplySingle,
# 'docs/templates/public/extensions/runtime.html')
# Apply to an unknown file.
self.assertRaises(FileNotFoundError, self._ApplySingle, 'not_existing')
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
sillydan1/WhatEverEngine
|
openglcsharp/Lib/os.py
|
109
|
26300
|
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
for x in walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key.upper(), *args)
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key, *args)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
return name in globals()
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import warnings
msg = "os.popen2 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
close_fds=True)
return p.stdin, p.stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import warnings
msg = "os.popen3 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=PIPE, close_fds=True)
return p.stdin, p.stdout, p.stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import warnings
msg = "os.popen4 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen4")
import copy_reg as _copy_reg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except (OSError, IOError):
raise NotImplementedError("/dev/urandom (or equivalent) not found")
try:
bs = b""
while n > len(bs):
bs += read(_urandomfd, n - len(bs))
finally:
close(_urandomfd)
return bs
|
apache-2.0
|
40223250/40223250
|
wsgi/application_orig.py
|
135
|
4562
|
################################# 1. 宣告原始碼 coding, 導入必要模組
#coding=utf-8
import cherrypy
import random
# for path setup
import os
# for mako
from mako.lookup import TemplateLookup
################################# 2. 全域變數設定, 近端與遠端目錄設定
cwd = os.getcwd()
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
template_root_dir = os.environ['OPENSHIFT_REPO_DIR']+"/wsgi/static"
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
template_root_dir = cwd+"/static"
data_dir = cwd+"/local_data"
################################# 3. 定義主要類別 Guess
class Guess(object):
# 標準答案必須利用 session 機制儲存
_cp_config = {
# 配合 utf-8 格式之表單內容
# 若沒有 utf-8 encoding 設定,則表單不可輸入中文
'tools.encode.encoding': 'utf-8',
# 加入 session 設定
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
'tools.sessions.locking' : 'early',
'tools.sessions.storage_path' : data_dir+'/tmp',
# 內定的 session timeout 時間為 60 分鐘
'tools.sessions.timeout' : 60,
'tools.mako.directories' : template_root_dir+"/templates"
}
def __init__(self):
if not os.path.isdir(data_dir+"/tmp"):
try:
os.makedirs(data_dir+"/tmp")
except:
print("mkdir error")
@cherrypy.expose
def index(self, guess=None):
# 將標準答案存入 answer session 對應區
theanswer = random.randint(1, 100)
thecount = 0
# 將答案與計算次數變數存進 session 對應變數
cherrypy.session['answer'] = theanswer
cherrypy.session['count'] = thecount
套稿查詢 = TemplateLookup(directories=[template_root_dir+"/templates"])
# 必須要從 templates 目錄取出 index.html
內建頁面 = 套稿查詢.get_template("index.html")
return 內建頁面.render()
@cherrypy.expose
def default(self, attr='default'):
# 內建 default 方法, 找不到執行方法時, 會執行此方法
套稿查詢 = TemplateLookup(directories=[template_root_dir+"/templates"])
# 必須要從 templates 目錄取出 index.html
內建頁面 = 套稿查詢.get_template("default.html")
return 內建頁面.render()
@cherrypy.expose
def doCheck(self, guess=None):
# 假如使用者直接執行 doCheck, 則設法轉回根方法
if guess is None:
raise cherrypy.HTTPRedirect("/")
# 從 session 取出 answer 對應資料, 且處理直接執行 doCheck 時無法取 session 值情況
try:
theanswer = int(cherrypy.session.get('answer'))
except:
raise cherrypy.HTTPRedirect("/")
套稿查詢 = TemplateLookup(directories=[template_root_dir+"/templates"])
# 必須要從 templates 目錄取出 index.html
內建頁面 = 套稿查詢.get_template("docheck.html")
# 經由表單所取得的 guess 資料型別為 string
try:
theguess = int(guess)
except:
return 內建頁面.render(輸入="error")
cherrypy.session['count'] += 1
if theanswer < theguess:
return 內建頁面.render(輸入="big", theanswer=theanswer)
elif theanswer > theguess:
return 內建頁面.render(輸入="small", theanswer=theanswer)
else:
thecount = cherrypy.session.get('count')
return 內建頁面.render(輸入="exact", theanswer=theanswer, thecount=thecount)
@cherrypy.expose
def mytest(self):
套稿查詢 = TemplateLookup(directories=[template_root_dir+"/templates"])
# 必須要從 templates 目錄取出 mytest.html
內建頁面 = 套稿查詢.get_template("mytest.html")
return 內建頁面.render()
################################# 4. 程式啟動設定與執行
root = Guess()
application_conf = {# 設定靜態 templates 檔案目錄對應
'/templates':{
'tools.staticdir.on': True,
'tools.staticdir.root': template_root_dir,
'tools.staticdir.dir': 'templates',
'tools.staticdir.index' : 'index.htm'
}
}
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(root, config = application_conf)
else:
# 表示在近端執行
cherrypy.quickstart(root, config = application_conf)
|
gpl-3.0
|
bdoner/SickRage
|
lib/hachoir_core/field/string_field.py
|
86
|
14829
|
"""
String field classes:
- String: Fixed length string (no prefix/no suffix) ;
- CString: String which ends with nul byte ("\0") ;
- UnixLine: Unix line of text, string which ends with "\n" ;
- PascalString8, PascalString16, PascalString32: String prefixed with
length written in a 8, 16, 32-bit integer (use parent endian).
Constructor has optional arguments:
- strip: value can be a string or True ;
- charset: if set, convert string to unicode using this charset (in "replace"
mode which replace all buggy characters with ".").
Note: For PascalStringXX, prefixed value is the number of bytes and not
of characters!
"""
from hachoir_core.field import FieldError, Bytes
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from hachoir_core.tools import alignValue, makePrintable
from hachoir_core.i18n import guessBytesCharset, _
from hachoir_core import config
from codecs import BOM_UTF16_LE, BOM_UTF16_BE, BOM_UTF32_LE, BOM_UTF32_BE
# Default charset used to convert byte string to Unicode
# This charset is used if no charset is specified or on conversion error
FALLBACK_CHARSET = "ISO-8859-1"
class GenericString(Bytes):
"""
Generic string class.
charset have to be in CHARSET_8BIT or in UTF_CHARSET.
"""
VALID_FORMATS = ("C", "UnixLine",
"fixed", "Pascal8", "Pascal16", "Pascal32")
# 8-bit charsets
CHARSET_8BIT = set((
"ASCII", # ANSI X3.4-1968
"MacRoman",
"CP037", # EBCDIC 037
"CP874", # Thai
"WINDOWS-1250", # Central Europe
"WINDOWS-1251", # Cyrillic
"WINDOWS-1252", # Latin I
"WINDOWS-1253", # Greek
"WINDOWS-1254", # Turkish
"WINDOWS-1255", # Hebrew
"WINDOWS-1256", # Arabic
"WINDOWS-1257", # Baltic
"WINDOWS-1258", # Vietnam
"ISO-8859-1", # Latin-1
"ISO-8859-2", # Latin-2
"ISO-8859-3", # Latin-3
"ISO-8859-4", # Latin-4
"ISO-8859-5",
"ISO-8859-6",
"ISO-8859-7",
"ISO-8859-8",
"ISO-8859-9", # Latin-5
"ISO-8859-10", # Latin-6
"ISO-8859-11", # Thai
"ISO-8859-13", # Latin-7
"ISO-8859-14", # Latin-8
"ISO-8859-15", # Latin-9 or ("Latin-0")
"ISO-8859-16", # Latin-10
))
# UTF-xx charset familly
UTF_CHARSET = {
"UTF-8": (8, None),
"UTF-16-LE": (16, LITTLE_ENDIAN),
"UTF-32LE": (32, LITTLE_ENDIAN),
"UTF-16-BE": (16, BIG_ENDIAN),
"UTF-32BE": (32, BIG_ENDIAN),
"UTF-16": (16, "BOM"),
"UTF-32": (32, "BOM"),
}
# UTF-xx BOM => charset with endian
UTF_BOM = {
16: {BOM_UTF16_LE: "UTF-16-LE", BOM_UTF16_BE: "UTF-16-BE"},
32: {BOM_UTF32_LE: "UTF-32LE", BOM_UTF32_BE: "UTF-32BE"},
}
# Suffix format: value is suffix (string)
SUFFIX_FORMAT = {
"C": {
8: {LITTLE_ENDIAN: "\0", BIG_ENDIAN: "\0"},
16: {LITTLE_ENDIAN: "\0\0", BIG_ENDIAN: "\0\0"},
32: {LITTLE_ENDIAN: "\0\0\0\0", BIG_ENDIAN: "\0\0\0\0"},
},
"UnixLine": {
8: {LITTLE_ENDIAN: "\n", BIG_ENDIAN: "\n"},
16: {LITTLE_ENDIAN: "\n\0", BIG_ENDIAN: "\0\n"},
32: {LITTLE_ENDIAN: "\n\0\0\0", BIG_ENDIAN: "\0\0\0\n"},
},
}
# Pascal format: value is the size of the prefix in bits
PASCAL_FORMATS = {
"Pascal8": 1,
"Pascal16": 2,
"Pascal32": 4
}
# Raw value: with prefix and suffix, not stripped,
# and not converted to Unicode
_raw_value = None
def __init__(self, parent, name, format, description=None,
strip=None, charset=None, nbytes=None, truncate=None):
Bytes.__init__(self, parent, name, 1, description)
# Is format valid?
assert format in self.VALID_FORMATS
# Store options
self._format = format
self._strip = strip
self._truncate = truncate
# Check charset and compute character size in bytes
# (or None when it's not possible to guess character size)
if not charset or charset in self.CHARSET_8BIT:
self._character_size = 1 # one byte per character
elif charset in self.UTF_CHARSET:
self._character_size = None
else:
raise FieldError("Invalid charset for %s: \"%s\"" %
(self.path, charset))
self._charset = charset
# It is a fixed string?
if nbytes is not None:
assert self._format == "fixed"
# Arbitrary limits, just to catch some bugs...
if not (1 <= nbytes <= 0xffff):
raise FieldError("Invalid string size for %s: %s" %
(self.path, nbytes))
self._content_size = nbytes # content length in bytes
self._size = nbytes * 8
self._content_offset = 0
else:
# Format with a suffix: Find the end of the string
if self._format in self.SUFFIX_FORMAT:
self._content_offset = 0
# Choose the suffix
suffix = self.suffix_str
# Find the suffix
length = self._parent.stream.searchBytesLength(
suffix, False, self.absolute_address)
if length is None:
raise FieldError("Unable to find end of string %s (format %s)!"
% (self.path, self._format))
if 1 < len(suffix):
# Fix length for little endian bug with UTF-xx charset:
# u"abc" -> "a\0b\0c\0\0\0" (UTF-16-LE)
# search returns length=5, whereas real lenght is 6
length = alignValue(length, len(suffix))
# Compute sizes
self._content_size = length # in bytes
self._size = (length + len(suffix)) * 8
# Format with a prefix: Read prefixed length in bytes
else:
assert self._format in self.PASCAL_FORMATS
# Get the prefix size
prefix_size = self.PASCAL_FORMATS[self._format]
self._content_offset = prefix_size
# Read the prefix and compute sizes
value = self._parent.stream.readBits(
self.absolute_address, prefix_size*8, self._parent.endian)
self._content_size = value # in bytes
self._size = (prefix_size + value) * 8
# For UTF-16 and UTF-32, choose the right charset using BOM
if self._charset in self.UTF_CHARSET:
# Charset requires a BOM?
bomsize, endian = self.UTF_CHARSET[self._charset]
if endian == "BOM":
# Read the BOM value
nbytes = bomsize // 8
bom = self._parent.stream.readBytes(self.absolute_address, nbytes)
# Choose right charset using the BOM
bom_endian = self.UTF_BOM[bomsize]
if bom not in bom_endian:
raise FieldError("String %s has invalid BOM (%s)!"
% (self.path, repr(bom)))
self._charset = bom_endian[bom]
self._content_size -= nbytes
self._content_offset += nbytes
# Compute length in character if possible
if self._character_size:
self._length = self._content_size // self._character_size
else:
self._length = None
@staticmethod
def staticSuffixStr(format, charset, endian):
if format not in GenericString.SUFFIX_FORMAT:
return ''
suffix = GenericString.SUFFIX_FORMAT[format]
if charset in GenericString.UTF_CHARSET:
suffix_size = GenericString.UTF_CHARSET[charset][0]
suffix = suffix[suffix_size]
else:
suffix = suffix[8]
return suffix[endian]
def _getSuffixStr(self):
return self.staticSuffixStr(
self._format, self._charset, self._parent.endian)
suffix_str = property(_getSuffixStr)
def _convertText(self, text):
if not self._charset:
# charset is still unknown: guess the charset
self._charset = guessBytesCharset(text, default=FALLBACK_CHARSET)
# Try to convert to Unicode
try:
return unicode(text, self._charset, "strict")
except UnicodeDecodeError, err:
pass
#--- Conversion error ---
# Fix truncated UTF-16 string like 'B\0e' (3 bytes)
# => Add missing nul byte: 'B\0e\0' (4 bytes)
if err.reason == "truncated data" \
and err.end == len(text) \
and self._charset == "UTF-16-LE":
try:
text = unicode(text+"\0", self._charset, "strict")
self.warning("Fix truncated %s string: add missing nul byte" % self._charset)
return text
except UnicodeDecodeError, err:
pass
# On error, use FALLBACK_CHARSET
self.warning(u"Unable to convert string to Unicode: %s" % err)
return unicode(text, FALLBACK_CHARSET, "strict")
def _guessCharset(self):
addr = self.absolute_address + self._content_offset * 8
bytes = self._parent.stream.readBytes(addr, self._content_size)
return guessBytesCharset(bytes, default=FALLBACK_CHARSET)
def createValue(self, human=True):
# Compress data address (in bits) and size (in bytes)
if human:
addr = self.absolute_address + self._content_offset * 8
size = self._content_size
else:
addr = self.absolute_address
size = self._size // 8
if size == 0:
# Empty string
return u""
# Read bytes in data stream
text = self._parent.stream.readBytes(addr, size)
# Don't transform data?
if not human:
return text
# Convert text to Unicode
text = self._convertText(text)
# Truncate
if self._truncate:
pos = text.find(self._truncate)
if 0 <= pos:
text = text[:pos]
# Strip string if needed
if self._strip:
if isinstance(self._strip, (str, unicode)):
text = text.strip(self._strip)
else:
text = text.strip()
assert isinstance(text, unicode)
return text
def createDisplay(self, human=True):
if not human:
if self._raw_value is None:
self._raw_value = GenericString.createValue(self, False)
value = makePrintable(self._raw_value, "ASCII", to_unicode=True)
elif self._charset:
value = makePrintable(self.value, "ISO-8859-1", to_unicode=True)
else:
value = self.value
if config.max_string_length < len(value):
# Truncate string if needed
value = "%s(...)" % value[:config.max_string_length]
if not self._charset or not human:
return makePrintable(value, "ASCII", quote='"', to_unicode=True)
else:
if value:
return '"%s"' % value.replace('"', '\\"')
else:
return _("(empty)")
def createRawDisplay(self):
return GenericString.createDisplay(self, human=False)
def _getLength(self):
if self._length is None:
self._length = len(self.value)
return self._length
length = property(_getLength, doc="String length in characters")
def _getFormat(self):
return self._format
format = property(_getFormat, doc="String format (eg. 'C')")
def _getCharset(self):
if not self._charset:
self._charset = self._guessCharset()
return self._charset
charset = property(_getCharset, doc="String charset (eg. 'ISO-8859-1')")
def _getContentSize(self):
return self._content_size
content_size = property(_getContentSize, doc="Content size in bytes")
def _getContentOffset(self):
return self._content_offset
content_offset = property(_getContentOffset, doc="Content offset in bytes")
def getFieldType(self):
info = self.charset
if self._strip:
if isinstance(self._strip, (str, unicode)):
info += ",strip=%s" % makePrintable(self._strip, "ASCII", quote="'")
else:
info += ",strip=True"
return "%s<%s>" % (Bytes.getFieldType(self), info)
def stringFactory(name, format, doc):
class NewString(GenericString):
__doc__ = doc
def __init__(self, parent, name, description=None,
strip=None, charset=None, truncate=None):
GenericString.__init__(self, parent, name, format, description,
strip=strip, charset=charset, truncate=truncate)
cls = NewString
cls.__name__ = name
return cls
# String which ends with nul byte ("\0")
CString = stringFactory("CString", "C",
r"""C string: string ending with nul byte.
See GenericString to get more information.""")
# Unix line of text: string which ends with "\n" (ASCII 0x0A)
UnixLine = stringFactory("UnixLine", "UnixLine",
r"""Unix line: string ending with "\n" (ASCII code 10).
See GenericString to get more information.""")
# String prefixed with length written in a 8-bit integer
PascalString8 = stringFactory("PascalString8", "Pascal8",
r"""Pascal string: string prefixed with 8-bit integer containing its length (endian depends on parent endian).
See GenericString to get more information.""")
# String prefixed with length written in a 16-bit integer (use parent endian)
PascalString16 = stringFactory("PascalString16", "Pascal16",
r"""Pascal string: string prefixed with 16-bit integer containing its length (endian depends on parent endian).
See GenericString to get more information.""")
# String prefixed with length written in a 32-bit integer (use parent endian)
PascalString32 = stringFactory("PascalString32", "Pascal32",
r"""Pascal string: string prefixed with 32-bit integer containing its length (endian depends on parent endian).
See GenericString to get more information.""")
class String(GenericString):
"""
String with fixed size (size in bytes).
See GenericString to get more information.
"""
static_size = staticmethod(lambda *args, **kw: args[1]*8)
def __init__(self, parent, name, nbytes, description=None,
strip=None, charset=None, truncate=None):
GenericString.__init__(self, parent, name, "fixed", description,
strip=strip, charset=charset, nbytes=nbytes, truncate=truncate)
String.__name__ = "FixedString"
|
gpl-3.0
|
marcelomiky/PythonCodes
|
Intro ML Semcomp/semcomp17_ml/venv/lib/python3.5/site-packages/wheel/signatures/keys.py
|
471
|
3320
|
"""Store and retrieve wheel signing / verifying keys.
Given a scope (a package name, + meaning "all packages", or - meaning
"no packages"), return a list of verifying keys that are trusted for that
scope.
Given a package name, return a list of (scope, key) suggested keys to sign
that package (only the verifying keys; the private signing key is stored
elsewhere).
Keys here are represented as urlsafe_b64encoded strings with no padding.
Tentative command line interface:
# list trusts
wheel trust
# trust a particular key for all
wheel trust + key
# trust key for beaglevote
wheel trust beaglevote key
# stop trusting a key for all
wheel untrust + key
# generate a key pair
wheel keygen
# import a signing key from a file
wheel import keyfile
# export a signing key
wheel export key
"""
import json
import os.path
from wheel.util import native, load_config_paths, save_config_path
class WheelKeys(object):
SCHEMA = 1
CONFIG_NAME = 'wheel.json'
def __init__(self):
self.data = {'signers':[], 'verifiers':[]}
def load(self):
# XXX JSON is not a great database
for path in load_config_paths('wheel'):
conf = os.path.join(native(path), self.CONFIG_NAME)
if os.path.exists(conf):
with open(conf, 'r') as infile:
self.data = json.load(infile)
for x in ('signers', 'verifiers'):
if not x in self.data:
self.data[x] = []
if 'schema' not in self.data:
self.data['schema'] = self.SCHEMA
elif self.data['schema'] != self.SCHEMA:
raise ValueError(
"Bad wheel.json version {0}, expected {1}".format(
self.data['schema'], self.SCHEMA))
break
return self
def save(self):
# Try not to call this a very long time after load()
path = save_config_path('wheel')
conf = os.path.join(native(path), self.CONFIG_NAME)
with open(conf, 'w+') as out:
json.dump(self.data, out, indent=2)
return self
def trust(self, scope, vk):
"""Start trusting a particular key for given scope."""
self.data['verifiers'].append({'scope':scope, 'vk':vk})
return self
def untrust(self, scope, vk):
"""Stop trusting a particular key for given scope."""
self.data['verifiers'].remove({'scope':scope, 'vk':vk})
return self
def trusted(self, scope=None):
"""Return list of [(scope, trusted key), ...] for given scope."""
trust = [(x['scope'], x['vk']) for x in self.data['verifiers'] if x['scope'] in (scope, '+')]
trust.sort(key=lambda x: x[0])
trust.reverse()
return trust
def signers(self, scope):
"""Return list of signing key(s)."""
sign = [(x['scope'], x['vk']) for x in self.data['signers'] if x['scope'] in (scope, '+')]
sign.sort(key=lambda x: x[0])
sign.reverse()
return sign
def add_signer(self, scope, vk):
"""Remember verifying key vk as being valid for signing in scope."""
self.data['signers'].append({'scope':scope, 'vk':vk})
|
mit
|
a-tal/pypicloud
|
pypicloud/access/base.py
|
2
|
19245
|
""" The access backend object base class """
from collections import defaultdict
from passlib.apps import custom_app_context as pwd_context
from pyramid.security import (Authenticated, Everyone,
effective_principals, Allow, Deny,
ALL_PERMISSIONS)
from pyramid.settings import aslist
def group_to_principal(group):
""" Convert a group to its corresponding principal """
if group in (Everyone, Authenticated) or group.startswith('group:'):
return group
elif group == 'everyone':
return Everyone
elif group == 'authenticated':
return Authenticated
else:
return 'group:' + group
def groups_to_principals(groups):
""" Convert a list of groups to a list of principals """
return [group_to_principal(g) for g in groups]
class IAccessBackend(object):
""" Base class for retrieving user and package permission data """
mutable = False
ROOT_ACL = [
(Allow, Authenticated, 'login'),
(Allow, 'admin', ALL_PERMISSIONS),
(Deny, Everyone, ALL_PERMISSIONS),
]
def __init__(self, request=None, default_read=None, default_write=None,
cache_update=None):
self.request = request
self.default_read = default_read
self.default_write = default_write
self.cache_update = cache_update
@classmethod
def configure(cls, settings):
""" Configure the access backend with app settings """
return {
'default_read': aslist(settings.get('pypi.default_read',
['authenticated'])),
'default_write': aslist(settings.get('pypi.default_write', [])),
'cache_update': aslist(settings.get('pypi.cache_update',
['authenticated'])),
}
def allowed_permissions(self, package):
"""
Get all allowed permissions for all principals on a package
Returns
-------
perms : dict
Mapping of principal to tuple of permissions
"""
all_perms = {}
for user, perms in self.user_permissions(package).iteritems():
all_perms['user:' + user] = tuple(perms)
for group, perms in self.group_permissions(package).iteritems():
all_perms[group_to_principal(group)] = tuple(perms)
# If there are no group or user specifications for the package, use the
# default
if len(all_perms) == 0:
for principal in groups_to_principals(self.default_read):
all_perms[principal] = ('read',)
for principal in groups_to_principals(self.default_write):
if principal in all_perms:
all_perms[principal] += ('write',)
else:
all_perms[principal] = ('write',)
return all_perms
def get_acl(self, package):
""" Construct an ACL for a package """
acl = []
permissions = self.allowed_permissions(package)
for principal, perms in permissions.iteritems():
for perm in perms:
acl.append((Allow, principal, perm))
return acl
def has_permission(self, package, perm):
""" Check if this user has a permission for a package """
current_userid = self.request.userid
if current_userid is not None and self.is_admin(current_userid):
return True
perms = self.allowed_permissions(package)
for principal in effective_principals(self.request):
if perm in perms.get(principal, []):
return True
return False
def user_principals(self, username):
"""
Get a list of principals for a user
Parameters
----------
username : str
Returns
-------
principals : list
"""
principals = ['user:' + username, Everyone, Authenticated]
if self.is_admin(username):
principals.append('admin')
for group in self.groups(username):
principals.append('group:' + group)
return principals
def in_group(self, username, group):
"""
Find out if a user is in a group
Parameters
----------
username : str
Name of user. May be None for the anonymous user.
group : str
Name of the group. Supports 'everyone', 'authenticated', and
'admin'.
Returns
-------
member : bool
"""
if group in ('everyone', Everyone):
return True
elif username is None:
return False
elif group in ('authenticated', Authenticated):
return True
elif group == 'admin' and self.is_admin(username):
return True
else:
return group in self.groups(username)
def in_any_group(self, username, groups):
"""
Find out if a user is in any of a set of groups
Parameters
----------
username : str
Name of user. May be None for the anonymous user.
groups : list
list of group names. Supports 'everyone', 'authenticated', and
'admin'.
Returns
-------
member : bool
"""
return any((self.in_group(username, group) for group in groups))
def can_update_cache(self):
"""
Return True if the user has permissions to update the pypi cache
"""
return self.in_any_group(self.request.userid, self.cache_update)
def need_admin(self):
"""
Find out if there are any admin users
This should only be overridden by mutable backends
Returns
-------
need_admin : bool
True if no admin user exists and the backend is mutable, False
otherwise
"""
return False
def allow_register(self):
"""
Check if the backend allows registration
This should only be overridden by mutable backends
Returns
-------
allow : bool
"""
return False
def verify_user(self, username, password):
"""
Check the login credentials of a user
For Mutable backends, pending users should fail to verify
Parameters
----------
username : str
password : str
Returns
-------
valid : bool
True if user credentials are valid, false otherwise
"""
stored_pw = self._get_password_hash(username)
if self.mutable:
# if a user is pending, user_data will be None
user_data = self.user_data(username)
if user_data is None:
return False
return bool(stored_pw and pwd_context.verify(password, stored_pw))
def _get_password_hash(self, username):
""" Get the stored password hash for a user """
raise NotImplementedError
def groups(self, username=None):
"""
Get a list of all groups
If a username is specified, get all groups that the user belongs to
Parameters
----------
username : str, optional
Returns
-------
groups : list
List of group names
"""
raise NotImplementedError
def group_members(self, group):
"""
Get a list of users that belong to a group
Parameters
----------
group : str
Returns
-------
users : list
List of user names
"""
raise NotImplementedError
def is_admin(self, username):
"""
Check if the user is an admin
Parameters
----------
username : str
Returns
-------
is_admin : bool
"""
raise NotImplementedError
def group_permissions(self, package, group=None):
"""
Get a mapping of all groups to their permissions on a package
If a group is specified, just return the list of permissions for that
group
Parameters
----------
package : str
The name of a python package
group : str, optional
The name of a single group the check
Returns
-------
permissions : dict
If group is None, mapping of group name to a list of permissions
(which can contain 'read' and/or 'write')
permissions : list
If group is not None, a list of permissions for that group
Notes
-----
You may specify special groups 'everyone' and/or 'authenticated', which
correspond to all users and all logged in users respectively.
"""
raise NotImplementedError
def user_permissions(self, package, username=None):
"""
Get a mapping of all users to their permissions for a package
If a username is specified, just return the list of permissions for
that user
Parameters
----------
package : str
The name of a python package
username : str
The name of a single user the check
Returns
-------
permissions : dict
Mapping of username to a list of permissions (which can contain
'read' and/or 'write')
permissions : list
If username is not None, a list of permissions for that user
"""
raise NotImplementedError
def user_package_permissions(self, username):
"""
Get a list of all packages that a user has permissions on
Parameters
----------
username : str
Returns
-------
packages : list
List of dicts. Each dict contains 'package' (str) and 'permissions'
(list)
"""
raise NotImplementedError
def group_package_permissions(self, group):
"""
Get a list of all packages that a group has permissions on
Parameters
----------
group : str
Returns
-------
packages : list
List of dicts. Each dict contains 'package' (str) and 'permissions'
(list)
"""
raise NotImplementedError
def user_data(self, username=None):
"""
Get a list of all users or data for a single user
For Mutable backends, this MUST exclude all pending users
Returns
-------
users : list
Each user is a dict with a 'username' str, and 'admin' bool
user : dict
If a username is passed in, instead return one user with the fields
above plus a 'groups' list.
"""
raise NotImplementedError
def dump(self):
"""
Dump all of the access control data to a universal format
Returns
-------
data : dict
"""
from pypicloud import __version__
data = {}
data['allow_register'] = self.allow_register()
data['version'] = __version__
groups = self.groups()
users = self.user_data()
for user in users:
user['password'] = self._get_password_hash(user['username'])
data['groups'] = {}
packages = {
'users': defaultdict(dict),
'groups': defaultdict(dict),
}
for group in groups:
data['groups'][group] = self.group_members(group)
perms = self.group_package_permissions(group)
for perm in perms:
package = perm['package']
packages['groups'][package][group] = perm['permissions']
for user in users:
username = user['username']
perms = self.user_package_permissions(username)
for perm in perms:
package = perm['package']
packages['users'][package][username] = perm['permissions']
# Convert the defaultdict to a dict for easy serialization
packages['users'] = dict(packages['users'])
packages['groups'] = dict(packages['groups'])
data['users'] = users
data['packages'] = packages
return data
def load(self, data):
"""
Idempotently load universal access control data.
By default, this does nothing on immutable backends. Backends may
override this method to provide an implementation.
This method works by default on mutable backends with no override
necessary.
"""
raise TypeError("Access backend '%s' is not mutable and has no "
"'load' implementation" % self.__class__.__name__)
class IMutableAccessBackend(IAccessBackend):
"""
Base class for access backends that can change user/group permissions
"""
mutable = True
def need_admin(self):
for user in self.user_data():
if user['admin']:
return False
return True
def allow_register(self):
raise NotImplementedError
def set_allow_register(self, allow):
"""
Allow or disallow user registration
Parameters
----------
allow : bool
"""
raise NotImplementedError
def register(self, username, password):
"""
Register a new user
The new user should be marked as pending admin approval
Parameters
----------
username : str
password : str
This should be the plaintext password
"""
if self.allow_register():
self._register(username, pwd_context.encrypt(password))
def _register(self, username, password):
"""
Register a new user
The new user should be marked as pending admin approval
Parameters
----------
username : str
password : str
This will be the hash of the password
"""
raise NotImplementedError
def pending_users(self):
"""
Retrieve a list of all users pending admin approval
Returns
-------
users : list
List of usernames
"""
raise NotImplementedError
def approve_user(self, username):
"""
Mark a user as approved by the admin
Parameters
----------
username : str
"""
raise NotImplementedError
def edit_user_password(self, username, password):
"""
Change a user's password
Parameters
----------
username : str
password : str
"""
self._set_password_hash(username, pwd_context.encrypt(password))
def _set_password_hash(self, username, password_hash):
"""
Change a user's password
Parameters
----------
username : str
password_hash : str
The hashed password to store
"""
raise NotImplementedError
def delete_user(self, username):
"""
Delete a user
Parameters
----------
username : str
"""
raise NotImplementedError
def set_user_admin(self, username, admin):
"""
Grant or revoke admin permissions for a user
Parameters
----------
username : str
admin : bool
If True, grant permissions. If False, revoke.
"""
raise NotImplementedError
def edit_user_group(self, username, group, add):
"""
Add or remove a user to/from a group
Parameters
----------
username : str
group : str
add : bool
If True, add to group. If False, remove.
"""
raise NotImplementedError
def create_group(self, group):
"""
Create a new group
Parameters
----------
group : str
"""
raise NotImplementedError
def delete_group(self, group):
"""
Delete a group
Parameters
----------
group : str
"""
raise NotImplementedError
def edit_user_permission(self, package, username, perm, add):
"""
Grant or revoke a permission for a user on a package
Parameters
----------
package : str
username : str
perm : {'read', 'write'}
add : bool
If True, grant permissions. If False, revoke.
"""
raise NotImplementedError
def edit_group_permission(self, package, group, perm, add):
"""
Grant or revoke a permission for a group on a package
Parameters
----------
package : str
group : str
perm : {'read', 'write'}
add : bool
If True, grant permissions. If False, revoke.
"""
raise NotImplementedError
def dump(self):
data = super(IMutableAccessBackend, self).dump()
pending_users = []
for username in self.pending_users(): # pylint: disable=E1101
password = self._get_password_hash(username)
pending_users.append({
'username': username,
'password': password,
})
data['pending_users'] = pending_users
return data
def load(self, data):
# Have to temporarily set this as True for the load operation
self.set_allow_register(True)
pending_users = set(self.pending_users())
def user_exists(username):
""" Helper function that checks if a user already exists """
return (username in pending_users or
self.user_data(username) is not None)
for user in data['users']:
if not user_exists(user['username']):
self._register(user['username'], user['password'])
self.approve_user(user['username'])
self.set_user_admin(user['username'], user.get('admin', False))
for group, members in data['groups'].iteritems():
if len(self.group_members(group)) == 0:
self.create_group(group)
current_members = self.group_members(group)
add_members = set(members) - set(current_members)
for member in add_members:
self.edit_user_group(member, group, True)
for user in data.get('pending_users', []):
if not user_exists(user['username']):
self._register(user['username'], user['password'])
for package, groups in data['packages']['groups'].iteritems():
for group, permissions in groups.iteritems():
for perm in permissions:
self.edit_group_permission(package, group, perm, True)
for package, users in data['packages']['users'].iteritems():
for user, permissions in users.iteritems():
for perm in permissions:
self.edit_user_permission(package, user, perm, True)
self.set_allow_register(data['allow_register'])
|
mit
|
DavidAndreev/indico
|
migrations/versions/201604211656_258db7e5a3e5_sync_contribution_abstract_friendly_ids.py
|
2
|
4927
|
"""Sync contribution/abstract friendly ids
Revision ID: 258db7e5a3e5
Revises: 3ca8e62e6c36
Create Date: 2016-04-21 16:56:20.113767
"""
import sqlalchemy as sa
from alembic import context, op
# revision identifiers, used by Alembic.
revision = '258db7e5a3e5'
down_revision = '3ca8e62e6c36'
def _sync_last_contrib_id():
op.execute("""
UPDATE events.events e
SET last_friendly_contribution_id = greatest(
last_friendly_contribution_id,
(SELECT MAX(c.friendly_id) FROM events.contributions c WHERE c.event_id = e.id),
(SELECT MAX(a.friendly_id) FROM event_abstracts.abstracts a WHERE a.event_id = e.id)
)
WHERE
e.id IN (SELECT DISTINCT event_id FROM event_abstracts.abstracts) AND
last_friendly_contribution_id != greatest(
last_friendly_contribution_id,
(SELECT MAX(c.friendly_id) FROM events.contributions c WHERE c.event_id = e.id),
(SELECT MAX(a.friendly_id) FROM event_abstracts.abstracts a WHERE a.event_id = e.id)
)
""")
def _get_next_friendly_id(conn, event_id):
cur = conn.execute("""
UPDATE events.events
SET last_friendly_contribution_id = last_friendly_contribution_id + 1
WHERE events.events.id = %s
RETURNING last_friendly_contribution_id
""", (event_id,))
return cur.fetchone()[0]
def upgrade():
if context.is_offline_mode():
raise Exception('This upgrade is only possible in online mode')
conn = op.get_bind()
# Remove the trigger and unique index since we need to modify events
# with inconsistencies and also will have friendly_id collisions
# temporarily
op.execute("DROP TRIGGER consistent_timetable ON events.events")
op.drop_index('ix_uq_contributions_friendly_id_event_id', table_name='contributions', schema='events')
# Sync the friendly ID of contributions with that of their abstract
op.execute("""
UPDATE events.contributions c
SET friendly_id = (
SELECT friendly_id FROM event_abstracts.abstracts a WHERE a.id = c.abstract_id
)
WHERE
c.abstract_id IS NOT NULL AND
c.friendly_id != (SELECT friendly_id FROM event_abstracts.abstracts a WHERE a.id = c.abstract_id) AND
c.event_id IN (SELECT DISTINCT event_id FROM event_abstracts.abstracts)
""")
# Synchronize the friendly_id sequences so new contributions/abstracts can be added
_sync_last_contrib_id()
# Find contributions which now have friendly_id collisions and assign new ones
query = """
SELECT c.id, c.event_id
FROM events.contributions c
WHERE
NOT c.is_deleted AND
EXISTS (
SELECT 1
FROM events.contributions c2
WHERE
c2.event_id = c.event_id AND
c2.friendly_id = c.friendly_id AND
c2.id != c.id AND
c.abstract_id IS NULL
) AND
c.event_id IN (SELECT DISTINCT event_id FROM event_abstracts.abstracts)
"""
for contrib_id, event_id in conn.execute(query):
friendly_id = _get_next_friendly_id(conn, event_id)
print 'Updating friendly contribution ID to avoid collision', event_id, contrib_id, friendly_id
conn.execute("UPDATE events.contributions SET friendly_id = %s WHERE id = %s", (friendly_id, contrib_id))
# Assign new friendly IDs to contributions with no abstract that have friendly IDs colliding with abstracts
query = """
SELECT c.id, c.event_id
FROM events.contributions c
WHERE
EXISTS (
SELECT 1
FROM event_abstracts.abstracts a
WHERE
a.event_id = c.event_id AND
a.friendly_id = c.friendly_id AND
(c.abstract_id != a.id OR c.abstract_id IS NULL)
)
"""
for contrib_id, event_id in conn.execute(query):
friendly_id = _get_next_friendly_id(conn, event_id)
print 'Updating friendly contribution ID to avoid future collision', event_id, contrib_id, friendly_id
conn.execute("UPDATE events.contributions SET friendly_id = %s WHERE id = %s", (friendly_id, contrib_id))
# The sequences should still be in sync but re-sync them just in case
_sync_last_contrib_id()
# Restore the index and triggers
op.create_index(None, 'contributions', ['friendly_id', 'event_id'], unique=True,
postgresql_where=sa.text('NOT is_deleted'), schema='events')
op.execute("""
CREATE CONSTRAINT TRIGGER consistent_timetable
AFTER UPDATE
ON events.events
DEFERRABLE INITIALLY DEFERRED
FOR EACH ROW
EXECUTE PROCEDURE events.check_timetable_consistency('event');
""")
def downgrade():
pass
|
gpl-3.0
|
pravsripad/jumeg
|
pipelines/chop_and_apply_ica.py
|
3
|
18052
|
import os.path as op
import numpy as np
from utils import set_directory
import mne
from jumeg.decompose.ica_replace_mean_std import ICA, read_ica, apply_ica_replace_mean_std
from jumeg.jumeg_preprocessing import get_ics_cardiac, get_ics_ocular
from jumeg.jumeg_plot import plot_performance_artifact_rejection # , plot_artefact_overview
def determine_chop_times_every_x_s(total_time, chop_length=60.):
"""
Chop every X s where X=interval. If the last chop would have a length
under X it is combined with the penultimate chop.
Parameters
----------
total_time : float
Total length of the recording.
chop_length : float
Length of a chop.
Returns
-------
chop_times : list of float
Time points for when to chop the raw file
"""
chop_times = []
chop = 0.
while total_time >= chop + 2 * chop_length:
chop += chop_length
chop_times.append(chop)
return chop_times
def get_tmin_tmax(ct_idx, chop_times, sfreq):
"""
Get tmin and tmax for the chop interval based on the
time points given by chop_times.
Parameters:
-----------
ct_idx : int
Index corresponding to chop_times.
chop_times : list of float
List with the time points of when to chop the data.
sfreq : float
Sampling frequency of the measurement data.
Returns:
--------
tmin : int
Starting time of the chop interval in s.
tmax : int
Ending time of the chop interval in s.
"""
if ct_idx == 0:
tmin = 0
tmax = chop_times[ct_idx] - 1. / sfreq
print(int(tmin), int(tmax))
elif ct_idx == len(chop_times):
tmin = chop_times[ct_idx - 1]
tmax = None
print(int(tmin), "None")
else:
tmin = chop_times[ct_idx - 1]
tmax = chop_times[ct_idx] - 1. / sfreq
print(int(tmin), int(tmax))
return tmin, tmax
def apply_ica_and_plot_performance(raw, ica, name_ecg, name_eog, raw_fname, clean_fname, picks=None,
reject=None, replace_pre_whitener=True, save=False):
"""
Applies ICA to the raw object and plots the performance of rejecting ECG and EOG artifacts.
Parameters
----------
raw : mne.io.Raw()
Raw object ICA is applied to
ica : ICA object
ICA object being applied d to the raw object
name_ecg : str
Name of the ECG channel in the raw data
name_eog : str
Name of the (vertical) EOG channel in the raw data
raw_fname : str | None
Path for saving the raw object
clean_fname : str | None
Path for saving the ICA cleaned raw object
picks : array-like of int | None
Channels to be included for the calculation of pca_mean_ and _pre_whitener.
This selection SHOULD BE THE SAME AS the one used in ica.fit().
reject : dict | None
Rejection parameters based on peak-to-peak amplitude. This parameter SHOULD BE
THE SAME AS the one used in ica.fit().
Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'ecog', 'eog', 'ecg',
'hbo', 'hbr'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
It only applies if `inst` is of type Raw.
replace_pre_whitener : bool
If True, pre_whitener is replaced when applying ICA to
unfiltered data otherwise the original pre_whitener is used.
save : bool
Save the raw object and cleaned raw object
Returns
-------
raw_clean : mne.io.Raw()
Raw object after ICA cleaning
"""
# apply_ica_replace_mean_std processes in place -> need copy to plot performance
raw_copy = raw.copy()
ica = ica.copy()
raw_clean = apply_ica_replace_mean_std(raw, ica, picks=picks, reject=reject,
exclude=ica.exclude, n_pca_components=None,
replace_pre_whitener=replace_pre_whitener)
if save:
if raw_fname is not None:
raw_copy.save(raw_fname, overwrite=True)
raw_clean.save(clean_fname, overwrite=True)
overview_fname = clean_fname.rsplit('-raw.fif')[0] + ',overview-plot'
plot_performance_artifact_rejection(raw_copy, ica, overview_fname,
meg_clean=raw_clean,
show=False, verbose=False,
name_ecg=name_ecg,
name_eog=name_eog)
print('Saved ', overview_fname)
raw_copy.close()
return raw_clean
def fit_ica(raw, picks, reject, ecg_ch, eog_hor, eog_ver,
flow_ecg, fhigh_ecg, flow_eog, fhigh_eog, ecg_thresh,
eog_thresh, use_jumeg=True, random_state=42):
"""
Fit an ICA object to the raw file. Identify cardiac and ocular components
and mark them for removal.
Parameters:
-----------
inst : instance of Raw, Epochs or Evoked
Raw measurements to be decomposed.
picks : array-like of int
Channels to be included. This selection remains throughout the
initialized ICA solution. If None only good data channels are used.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'ecog', 'eog', 'ecg',
'hbo', 'hbr'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
It only applies if `inst` is of type Raw.
ecg_ch : array-like | ch_name | None
ECG channel to which the sources shall be compared. It has to be
of the same shape as the sources. If some string is supplied, a
routine will try to find a matching channel. If None, a score
function expecting only one input-array argument must be used,
for instance, scipy.stats.skew (default).
eog_hor : array-like | ch_name | None
Horizontal EOG channel to which the sources shall be compared.
It has to be of the same shape as the sources. If some string
is supplied, a routine will try to find a matching channel. If
None, a score function expecting only one input-array argument
must be used, for instance, scipy.stats.skew (default).
eog_ver : array-like | ch_name | None
Vertical EOG channel to which the sources shall be compared.
It has to be of the same shape as the sources. If some string
is supplied, a routine will try to find a matching channel. If
None, a score function expecting only one input-array argument
must be used, for instance, scipy.stats.skew (default).
flow_ecg : float
Low pass frequency for ECG component identification.
fhigh_ecg : float
High pass frequency for ECG component identification.
flow_eog : float
Low pass frequency for EOG component identification.
fhigh_eog : float
High pass frequency for EOG component identification.
ecg_thresh : float
Threshold for ECG component idenfication.
eog_thresh : float
Threshold for EOG component idenfication.
use_jumeg : bool
Use the JuMEG scoring method for the identification of
artifact components.
random_state : None | int | instance of np.random.RandomState
np.random.RandomState to initialize the FastICA estimation.
As the estimation is non-deterministic it can be useful to
fix the seed to have reproducible results. Defaults to None.
Returns:
--------
ica : mne.preprocessing.ICA
ICA object for raw file with ECG and EOG components marked for removal.
"""
# increased iteration to make it converge
# fix the number of components to 40, depending on your application you
# might want to raise the number
# 'extended-infomax', 'fastica', 'picard'
ica = ICA(method='fastica', n_components=40, random_state=random_state,
max_pca_components=None, max_iter=5000, verbose=False)
ica.fit(raw, picks=picks, decim=None, reject=reject, verbose=True)
#######################################################################
# identify bad components
#######################################################################
# get ECG and EOG related components using MNE
print('Computing scores and identifying components..')
if use_jumeg:
# get ECG/EOG related components using JuMEG
ic_ecg = get_ics_cardiac(raw, ica, flow=flow_ecg, fhigh=fhigh_ecg,
thresh=ecg_thresh, tmin=-0.5, tmax=0.5, name_ecg=ecg_ch,
use_CTPS=True)[0]
ic_eog = get_ics_ocular(raw, ica, flow=flow_eog, fhigh=fhigh_eog,
thresh=eog_thresh, name_eog_hor=eog_hor, name_eog_ver=eog_ver,
score_func='pearsonr')
ic_ecg = list(set(ic_ecg))
ic_eog = list(set(ic_eog))
ic_ecg.sort()
ic_eog.sort()
# if necessary include components identified by correlation as well
bads_list = list(set(list(ic_ecg) + list(ic_eog)))
bads_list.sort()
ica.exclude = bads_list
print('Identified ECG components are: ', ic_ecg)
print('Identified EOG components are: ', ic_eog)
else:
ecg_scores = ica.score_sources(raw, target=ecg_ch, score_func='pearsonr',
l_freq=flow_ecg, h_freq=fhigh_ecg, verbose=False)
# horizontal channel
eog1_scores = ica.score_sources(raw, target=eog_hor, score_func='pearsonr',
l_freq=flow_eog, h_freq=fhigh_eog, verbose=False)
# vertical channel
eog2_scores = ica.score_sources(raw, target=eog_ver, score_func='pearsonr',
l_freq=flow_eog, h_freq=fhigh_eog, verbose=False)
# print the top ecg, eog correlation scores
ecg_inds = np.where(np.abs(ecg_scores) > ecg_thresh)[0]
eog1_inds = np.where(np.abs(eog1_scores) > eog_thresh)[0]
eog2_inds = np.where(np.abs(eog2_scores) > eog_thresh)[0]
highly_corr = list(set(np.concatenate((ecg_inds, eog1_inds, eog2_inds))))
highly_corr.sort()
highly_corr_ecg = list(set(ecg_inds))
highly_corr_eog1 = list(set(eog1_inds))
highly_corr_eog2 = list(set(eog2_inds))
highly_corr_ecg.sort()
highly_corr_eog1.sort()
highly_corr_eog2.sort()
print('Highly correlated artifact components are:')
print(' ECG: ', highly_corr_ecg)
print(' EOG 1:', highly_corr_eog1)
print(' EOG 2:', highly_corr_eog2)
# if necessary include components identified by correlation as well
ica.exclude = highly_corr
print("Plot ica sources to remove jumpy component for channels 4, 6, 8, 22")
return ica
def chop_and_apply_ica(raw_filt_fname, ica_cfg):
"""
Read raw file, chop it into smaller segments and apply ica on the
chops. Save the ICA objects plus cleaned raw chops. Plot overview
of the artifact rejection.
Parameters:
-----------
raw_filt_fname : str
The filtered raw file to clean.
ica_cfg : dict
Dict containing the ica specific settings from the config file.
Returns:
--------
clean_filtered : mne.io.Raw instances
Cleaned, filtered raw object.
clean_unfiltered : mne.io.Raw instances or None
Cleaned, unfiltered raw object or None if ica is not to be
applied on unfiltered data.
"""
raw_chop_clean_filtered_list = []
raw_chop_clean_unfiltered_list = []
print('Running chop_and_apply_ica on ', raw_filt_fname)
###########################################################################
# load settings from ica config
###########################################################################
chop_length = ica_cfg['chop_length']
ecg_ch = ica_cfg['ecg_ch']
eog_hor = ica_cfg['eog_hor_ch']
eog_ver = ica_cfg['eog_ver_ch']
flow_ecg = ica_cfg['flow_ecg']
fhigh_ecg = ica_cfg['fhigh_ecg']
flow_eog = ica_cfg['flow_eog']
fhigh_eog = ica_cfg['fhigh_eog']
ecg_thresh = ica_cfg['ecg_thresh']
eog_thresh = ica_cfg['eog_thresh']
use_jumeg = ica_cfg['use_jumeg']
random_state = ica_cfg['random_state']
unfiltered = ica_cfg['unfiltered']
reject = ica_cfg['reject']
exclude = ica_cfg['exclude']
save = ica_cfg['save']
# start cleaning
raw_filt = mne.io.Raw(raw_filt_fname, preload=True, verbose=True)
if unfiltered:
raw_unfilt_fname = raw_filt_fname.replace(',fibp', '')
raw_unfilt = mne.io.Raw(raw_unfilt_fname, preload=True, verbose=True)
picks = mne.pick_types(raw_filt.info, meg=True, exclude=exclude)
# you might want to determine the chop time in a more sophisticated way
# to avoid accidentally chopping in the middle of a trial
chop_times = determine_chop_times_every_x_s(raw_filt.n_times / raw_filt.info["sfreq"],
chop_length=chop_length)
# chop the data and apply filtering
# avoid double counting of data point at chop: tmax = chop_times[i] - 1./raw.info["sfreq"]
for i in range(0, len(chop_times) + 1):
# get chop interval
tmin, tmax = get_tmin_tmax(ct_idx=i, chop_times=chop_times,
sfreq=raw_filt.info["sfreq"])
#######################################################################
# building the file names here
#######################################################################
info_filt = "fibp"
if tmax is not None:
tmaxi = int(tmax)
else:
tmaxi = tmax
dirname = op.join(op.dirname(raw_filt_fname), 'chops')
set_directory(dirname)
prefix_filt = raw_filt_fname.rsplit('/')[-1].rsplit('-raw.fif')[0]
ica_fname = op.join(dirname, prefix_filt + ',{}-{}-ica.fif'.format(int(tmin), tmaxi))
# make sure to copy because the original is lost
raw_filt_chop = raw_filt.copy().crop(tmin=tmin, tmax=tmax)
clean_filt_fname = op.join(dirname, prefix_filt + ',{},ar,{}-{}-raw.fif'.format(info_filt, int(tmin), tmaxi))
raw_filt_chop_fname = op.join(dirname, prefix_filt + ',{},{}-{}-raw.fif'.format(info_filt, int(tmin), tmaxi))
if unfiltered:
prefix_unfilt = prefix_filt.replace(',fibp', '')
raw_unfilt_chop = raw_unfilt.copy().crop(tmin=tmin, tmax=tmax)
clean_unfilt_fname = op.join(dirname, prefix_unfilt + ',ar,{}-{}-raw.fif'.format(int(tmin), tmaxi))
raw_unfilt_chop_fname = op.join(dirname, prefix_unfilt + ',{}-{}-raw.fif'.format(int(tmin), tmaxi))
#######################################################################
# run the ICA on the chops
#######################################################################
print('Starting ICA...')
if op.isfile(ica_fname):
ica = read_ica(ica_fname)
else:
ica = fit_ica(raw=raw_filt_chop, picks=picks, reject=reject,
ecg_ch=ecg_ch, eog_hor=eog_hor, eog_ver=eog_ver,
flow_ecg=flow_ecg, fhigh_ecg=fhigh_ecg,
flow_eog=flow_eog, fhigh_eog=fhigh_eog,
ecg_thresh=ecg_thresh, eog_thresh=eog_thresh,
use_jumeg=use_jumeg, random_state=random_state)
# plot topo-plots first because sometimes components are hard to identify
# ica.plot_components()
# do the most important manual check
ica.plot_sources(raw_filt_chop, block=True)
# save ica object
ica.save(ica_fname)
print('ICA components excluded: ', ica.exclude)
#######################################################################
# apply the ICA to data and save the resulting files
#######################################################################
print('Running cleaning on filtered data...')
clean_filt_chop = apply_ica_and_plot_performance(raw_filt_chop, ica, ecg_ch, eog_ver,
raw_filt_chop_fname, clean_fname=clean_filt_fname,
picks=picks, replace_pre_whitener=True,
reject=reject, save=save)
raw_chop_clean_filtered_list.append(clean_filt_chop)
if unfiltered:
print('Running cleaning on unfiltered data...')
clean_unfilt_chop = apply_ica_and_plot_performance(raw_unfilt_chop, ica, ecg_ch, eog_ver,
raw_unfilt_chop_fname, clean_fname=clean_unfilt_fname,
picks=picks, replace_pre_whitener=True,
reject=reject, save=save)
raw_chop_clean_unfiltered_list.append(clean_unfilt_chop)
# if tmax is None, last chop is reached
if tmax is None:
break
clean_filt_concat = mne.concatenate_raws(raw_chop_clean_filtered_list)
if unfiltered:
clean_unfilt_concat = mne.concatenate_raws(raw_chop_clean_unfiltered_list)
else:
clean_unfilt_concat = None
return clean_filt_concat, clean_unfilt_concat
|
bsd-3-clause
|
patdoyle1/FastMath
|
lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py
|
168
|
26964
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Access to Python's configuration information."""
import codecs
import os
import re
import sys
from os.path import pardir, realpath
try:
import configparser
except ImportError:
import ConfigParser as configparser
__all__ = [
'get_config_h_filename',
'get_config_var',
'get_config_vars',
'get_makefile_filename',
'get_path',
'get_path_names',
'get_paths',
'get_platform',
'get_python_version',
'get_scheme_names',
'parse_config_h',
]
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
_cfg_read = False
def _ensure_cfg_read():
global _cfg_read
if not _cfg_read:
from distlib.resources import finder
backport_package = __name__.rsplit('.', 1)[0]
_finder = finder(backport_package)
_cfgfile = _finder.find('sysconfig.cfg')
assert _cfgfile, 'sysconfig.cfg exists'
with _cfgfile.as_stream() as s:
_SCHEMES.readfp(s)
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_SCHEMES.set(scheme, 'include', '{srcdir}/Include')
_SCHEMES.set(scheme, 'platinclude', '{projectbase}/.')
_cfg_read = True
_SCHEMES = configparser.RawConfigParser()
_VAR_REPL = re.compile(r'\{([^{]*?)\}')
def _expand_globals(config):
_ensure_cfg_read()
if config.has_section('globals'):
globals = config.items('globals')
else:
globals = tuple()
sections = config.sections()
for section in sections:
if section == 'globals':
continue
for option, value in globals:
if config.has_option(section, option):
continue
config.set(section, option, value)
config.remove_section('globals')
# now expanding local variables defined in the cfg file
#
for section in config.sections():
variables = dict(config.items(section))
def _replacer(matchobj):
name = matchobj.group(1)
if name in variables:
return variables[name]
return matchobj.group(0)
for option, value in config.items(section):
config.set(section, option, _VAR_REPL.sub(_replacer, value))
#_expand_globals(_SCHEMES)
# FIXME don't rely on sys.version here, its format is an implementation detail
# of CPython, use sys.version_info or sys.hexversion
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _subst_vars(path, local_vars):
"""In the string `path`, replace tokens like {some.thing} with the
corresponding value from the map `local_vars`.
If there is no corresponding value, leave the token unchanged.
"""
def _replacer(matchobj):
name = matchobj.group(1)
if name in local_vars:
return local_vars[name]
elif name in os.environ:
return os.environ[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, path)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _SCHEMES.items(scheme):
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def format_value(value, vars):
def _replacer(matchobj):
name = matchobj.group(1)
if name in vars:
return vars[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, value)
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
return env_base
else:
return joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
if env_base:
return env_base
else:
return joinuser("~", "Library", framework, "%d.%d" %
sys.version_info[:2])
if env_base:
return env_base
else:
return joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
variables = list(notdone.keys())
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
while len(variables) > 0:
for name in tuple(variables):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m is not None:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if (name.startswith('PY_') and
name[3:] in renamed_variables):
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
variables.remove(name)
if (name.startswith('PY_') and
name[3:] in renamed_variables):
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference (e.g. "prefix=$/opt/python");
# just drop it since we can't deal
done[name] = value
variables.remove(name)
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def get_makefile_filename():
"""Return the path of the Makefile."""
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
if hasattr(sys, 'abiflags'):
config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
else:
config_dir_name = 'config'
return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Return a tuple containing the schemes names."""
return tuple(sorted(_SCHEMES.sections()))
def get_path_names():
"""Return a tuple containing the paths names."""
# xxx see if we want a static list
return _SCHEMES.options('posix_prefix')
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
_ensure_cfg_read()
if expand:
return _expand_vars(scheme, vars)
else:
return dict(_SCHEMES.items(scheme))
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# distutils2 module.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
try:
_CONFIG_VARS['abiflags'] = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
_CONFIG_VARS['abiflags'] = ''
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
if sys.version >= '2.6':
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
else:
_CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir'])
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search('-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile(r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if True:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if ((macrelease + '.') >= '10.4.' and
'-arch' in get_config_vars().get('CFLAGS', '').strip()):
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print('%s: ' % (title))
print('\t%s = "%s"' % (key, value))
def _main():
"""Display all information sysconfig detains."""
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars())
if __name__ == '__main__':
_main()
|
gpl-2.0
|
mmakmo/python
|
crawling_scraping/chapter05/word_frequency.py
|
1
|
1475
|
import sys
import os
from glob import glob
from collections import Counter
import MeCab
def main():
"""
"""
input_dir = sys.argv[1]
tagger = MeCab.Tagger('')
tagger.parse('')
frequency = Counter()
count_processed = 0
for path in glob(os.path.join(input_dir, '*', 'wiki_*')):
print('Processing {0}...'.format(path), file=sys.stderr)
with open(path) as file:
for content in iter_docs(file):
tokens = get_tokens(tagger, content)
frequency.update(tokens)
count_processed += 1
if count_processed % 1000 == 0:
print('{0} documents were processed.'.format(count_processed), file=sys.stderr)
for token, count in frequency.most_common(30):
print(token, count)
def iter_docs(file):
"""
"""
for line in file:
if line.startswith('<doc '):
buffer = []
elif line.startswith('</doc>'):
content = ''.join(buffer)
yield content
else:
buffer.append(line)
def get_tokens(tagger, content):
"""
"""
tokens = []
node = tagger.parseToNode(content)
while node:
category, sub_category = node.feature.split(',')[:2]
if category == '名詞' and sub_category in ('固有名詞', '一般'):
tokens.append(node.surface)
node = node.next
return tokens
if __name__ == '__main__':
main()
|
mit
|
xsynergy510x/android_external_chromium_org
|
remoting/host/installer/build-installer-archive.py
|
163
|
9195
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a zip archive for the Chrome Remote Desktop Host installer.
This script builds a zip file that contains all the files needed to build an
installer for Chrome Remote Desktop Host.
This zip archive is then used by the signing bots to:
(1) Sign the binaries
(2) Build the final installer
TODO(garykac) We should consider merging this with build-webapp.py.
"""
import os
import shutil
import subprocess
import sys
import zipfile
def cleanDir(dir):
"""Deletes and recreates the dir to make sure it is clean.
Args:
dir: The directory to clean.
"""
try:
shutil.rmtree(dir)
except OSError:
if os.path.exists(dir):
raise
else:
pass
os.makedirs(dir, 0775)
def buildDefDictionary(definitions):
"""Builds the definition dictionary from the VARIABLE=value array.
Args:
defs: Array of variable definitions: 'VARIABLE=value'.
Returns:
Dictionary with the definitions.
"""
defs = {}
for d in definitions:
(key, val) = d.split('=')
defs[key] = val
return defs
def createZip(zip_path, directory):
"""Creates a zipfile at zip_path for the given directory.
Args:
zip_path: Path to zip file to create.
directory: Directory with contents to archive.
"""
zipfile_base = os.path.splitext(os.path.basename(zip_path))[0]
zip = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
for (root, dirs, files) in os.walk(directory):
for f in files:
full_path = os.path.join(root, f)
rel_path = os.path.relpath(full_path, directory)
zip.write(full_path, os.path.join(zipfile_base, rel_path))
zip.close()
def remapSrcFile(dst_root, src_roots, src_file):
"""Calculates destination file path and creates directory.
Any matching |src_roots| prefix is stripped from |src_file| before
appending to |dst_root|.
For example, given:
dst_root = '/output'
src_roots = ['host/installer/mac']
src_file = 'host/installer/mac/Scripts/keystone_install.sh'
The final calculated path is:
'/output/Scripts/keystone_install.sh'
The |src_file| must match one of the |src_roots| prefixes. If there are no
matches, then an error is reported.
If multiple |src_roots| match, then only the first match is applied. Because
of this, if you have roots that share a common prefix, the longest string
should be first in this array.
Args:
dst_root: Target directory where files are copied.
src_roots: Array of path prefixes which will be stripped of |src_file|
(if they match) before appending it to the |dst_root|.
src_file: Source file to be copied.
Returns:
Full path to destination file in |dst_root|.
"""
# Strip of directory prefix.
found_root = False
for root in src_roots:
root = os.path.normpath(root)
src_file = os.path.normpath(src_file)
if os.path.commonprefix([root, src_file]) == root:
src_file = os.path.relpath(src_file, root)
found_root = True
break
if not found_root:
error('Unable to match prefix for %s' % src_file)
dst_file = os.path.join(dst_root, src_file)
# Make sure target directory exists.
dst_dir = os.path.dirname(dst_file)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir, 0775)
return dst_file
def copyFileWithDefs(src_file, dst_file, defs):
"""Copies from src_file to dst_file, performing variable substitution.
Any @@VARIABLE@@ in the source is replaced with the value of VARIABLE
in the |defs| dictionary when written to the destination file.
Args:
src_file: Full or relative path to source file to copy.
dst_file: Relative path (and filename) where src_file should be copied.
defs: Dictionary of variable definitions.
"""
data = open(src_file, 'r').read()
for key, val in defs.iteritems():
try:
data = data.replace('@@' + key + '@@', val)
except TypeError:
print repr(key), repr(val)
open(dst_file, 'w').write(data)
shutil.copystat(src_file, dst_file)
def copyZipIntoArchive(out_dir, files_root, zip_file):
"""Expands the zip_file into the out_dir, preserving the directory structure.
Args:
out_dir: Target directory where unzipped files are copied.
files_root: Path prefix which is stripped of zip_file before appending
it to the out_dir.
zip_file: Relative path (and filename) to the zip file.
"""
base_zip_name = os.path.basename(zip_file)
# We don't use the 'zipfile' module here because it doesn't restore all the
# file permissions correctly. We use the 'unzip' command manually.
old_dir = os.getcwd();
os.chdir(os.path.dirname(zip_file))
subprocess.call(['unzip', '-qq', '-o', base_zip_name])
os.chdir(old_dir)
# Unzip into correct dir in out_dir.
out_zip_path = remapSrcFile(out_dir, files_root, zip_file)
out_zip_dir = os.path.dirname(out_zip_path)
(src_dir, ignore1) = os.path.splitext(zip_file)
(base_dir_name, ignore2) = os.path.splitext(base_zip_name)
shutil.copytree(src_dir, os.path.join(out_zip_dir, base_dir_name))
def buildHostArchive(temp_dir, zip_path, source_file_roots, source_files,
gen_files, gen_files_dst, defs):
"""Builds a zip archive with the files needed to build the installer.
Args:
temp_dir: Temporary dir used to build up the contents for the archive.
zip_path: Full path to the zip file to create.
source_file_roots: Array of path prefixes to strip off |files| when adding
to the archive.
source_files: The array of files to add to archive. The path structure is
preserved (except for the |files_root| prefix).
gen_files: Full path to binaries to add to archive.
gen_files_dst: Relative path of where to add binary files in archive.
This array needs to parallel |binaries_src|.
defs: Dictionary of variable definitions.
"""
cleanDir(temp_dir)
for f in source_files:
dst_file = remapSrcFile(temp_dir, source_file_roots, f)
base_file = os.path.basename(f)
(base, ext) = os.path.splitext(f)
if ext == '.zip':
copyZipIntoArchive(temp_dir, source_file_roots, f)
elif ext in ['.packproj', '.pkgproj', '.plist', '.props', '.sh', '.json']:
copyFileWithDefs(f, dst_file, defs)
else:
shutil.copy2(f, dst_file)
for bs, bd in zip(gen_files, gen_files_dst):
dst_file = os.path.join(temp_dir, bd)
if not os.path.exists(os.path.dirname(dst_file)):
os.makedirs(os.path.dirname(dst_file))
if os.path.isdir(bs):
shutil.copytree(bs, dst_file)
else:
shutil.copy2(bs, dst_file)
createZip(zip_path, temp_dir)
def error(msg):
sys.stderr.write('ERROR: %s\n' % msg)
sys.exit(1)
def usage():
"""Display basic usage information."""
print ('Usage: %s\n'
' <temp-dir> <zip-path>\n'
' --source-file-roots <list of roots to strip off source files...>\n'
' --source-files <list of source files...>\n'
' --generated-files <list of generated target files...>\n'
' --generated-files-dst <dst for each generated file...>\n'
' --defs <list of VARIABLE=value definitions...>'
) % sys.argv[0]
def main():
if len(sys.argv) < 2:
usage()
error('Too few arguments')
temp_dir = sys.argv[1]
zip_path = sys.argv[2]
arg_mode = ''
source_file_roots = []
source_files = []
generated_files = []
generated_files_dst = []
definitions = []
for arg in sys.argv[3:]:
if arg == '--source-file-roots':
arg_mode = 'src-roots'
elif arg == '--source-files':
arg_mode = 'files'
elif arg == '--generated-files':
arg_mode = 'gen-src'
elif arg == '--generated-files-dst':
arg_mode = 'gen-dst'
elif arg == '--defs':
arg_mode = 'defs'
elif arg_mode == 'src-roots':
source_file_roots.append(arg)
elif arg_mode == 'files':
source_files.append(arg)
elif arg_mode == 'gen-src':
generated_files.append(arg)
elif arg_mode == 'gen-dst':
generated_files_dst.append(arg)
elif arg_mode == 'defs':
definitions.append(arg)
else:
usage()
error('Expected --source-files')
# Make sure at least one file was specified.
if len(source_files) == 0 and len(generated_files) == 0:
error('At least one input file must be specified.')
# Sort roots to ensure the longest one is first. See comment in remapSrcFile
# for why this is necessary.
source_file_roots = map(os.path.normpath, source_file_roots)
source_file_roots.sort(key=len, reverse=True)
# Verify that the 2 generated_files arrays have the same number of elements.
if len(generated_files) != len(generated_files_dst):
error('len(--generated-files) != len(--generated-files-dst)')
defs = buildDefDictionary(definitions)
result = buildHostArchive(temp_dir, zip_path, source_file_roots,
source_files, generated_files, generated_files_dst,
defs)
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
abandons/jieba
|
test/extract_topic.py
|
65
|
1463
|
import sys
sys.path.append("../")
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import decomposition
import jieba
import time
import glob
import sys
import os
import random
if len(sys.argv)<2:
print("usage: extract_topic.py directory [n_topic] [n_top_words]")
sys.exit(0)
n_topic = 10
n_top_words = 25
if len(sys.argv)>2:
n_topic = int(sys.argv[2])
if len(sys.argv)>3:
n_top_words = int(sys.argv[3])
count_vect = CountVectorizer()
docs = []
pattern = os.path.join(sys.argv[1],"*.txt")
print("read "+pattern)
for f_name in glob.glob(pattern):
with open(f_name) as f:
print("read file:", f_name)
for line in f: #one line as a document
words = " ".join(jieba.cut(line))
docs.append(words)
random.shuffle(docs)
print("read done.")
print("transform")
counts = count_vect.fit_transform(docs)
tfidf = TfidfTransformer().fit_transform(counts)
print(tfidf.shape)
t0 = time.time()
print("training...")
nmf = decomposition.NMF(n_components=n_topic).fit(tfidf)
print("done in %0.3fs." % (time.time() - t0))
# Inverse the vectorizer vocabulary to be able
feature_names = count_vect.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print("")
|
mit
|
BenLand100/rat-pac
|
python/SCons/cpp.py
|
19
|
20097
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/cpp.py 4043 2009/02/23 09:06:45 scons"
__doc__ = """
SCons C Pre-Processor module
"""
# TODO(1.5): remove this import
# This module doesn't use anything from SCons by name, but we import SCons
# here to pull in zip() from the SCons.compat layer for early Pythons.
import SCons
import os
import re
import string
#
# First "subsystem" of regular expressions that we set up:
#
# Stuff to turn the C preprocessor directives in a file's contents into
# a list of tuples that we can process easily.
#
# A table of regular expressions that fetch the arguments from the rest of
# a C preprocessor line. Different directives have different arguments
# that we want to fetch, using the regular expressions to which the lists
# of preprocessor directives map.
cpp_lines_dict = {
# Fetch the rest of a #if/#elif/#ifdef/#ifndef as one argument,
# separated from the keyword by white space.
('if', 'elif', 'ifdef', 'ifndef',)
: '\s+(.+)',
# Fetch the rest of a #import/#include/#include_next line as one
# argument, with white space optional.
('import', 'include', 'include_next',)
: '\s*(.+)',
# We don't care what comes after a #else or #endif line.
('else', 'endif',) : '',
# Fetch three arguments from a #define line:
# 1) The #defined keyword.
# 2) The optional parentheses and arguments (if it's a function-like
# macro, '' if it's not).
# 3) The expansion value.
('define',) : '\s+([_A-Za-z][_A-Za-z0-9_]+)(\([^)]*\))?\s*(.*)',
# Fetch the #undefed keyword from a #undef line.
('undef',) : '\s+([_A-Za-z][A-Za-z0-9_]+)',
}
# Create a table that maps each individual C preprocessor directive to
# the corresponding compiled regular expression that fetches the arguments
# we care about.
Table = {}
for op_list, expr in cpp_lines_dict.items():
e = re.compile(expr)
for op in op_list:
Table[op] = e
del e
del op
del op_list
# Create a list of the expressions we'll use to match all of the
# preprocessor directives. These are the same as the directives
# themselves *except* that we must use a negative lookahead assertion
# when matching "if" so it doesn't match the "if" in "ifdef."
override = {
'if' : 'if(?!def)',
}
l = map(lambda x, o=override: o.get(x, x), Table.keys())
# Turn the list of expressions into one big honkin' regular expression
# that will match all the preprocessor lines at once. This will return
# a list of tuples, one for each preprocessor line. The preprocessor
# directive will be the first element in each tuple, and the rest of
# the line will be the second element.
e = '^\s*#\s*(' + string.join(l, '|') + ')(.*)$'
# And last but not least, compile the expression.
CPP_Expression = re.compile(e, re.M)
#
# Second "subsystem" of regular expressions that we set up:
#
# Stuff to translate a C preprocessor expression (as found on a #if or
# #elif line) into an equivalent Python expression that we can eval().
#
# A dictionary that maps the C representation of Boolean operators
# to their Python equivalents.
CPP_to_Python_Ops_Dict = {
'!' : ' not ',
'!=' : ' != ',
'&&' : ' and ',
'||' : ' or ',
'?' : ' and ',
':' : ' or ',
'\r' : '',
}
CPP_to_Python_Ops_Sub = lambda m, d=CPP_to_Python_Ops_Dict: d[m.group(0)]
# We have to sort the keys by length so that longer expressions
# come *before* shorter expressions--in particular, "!=" must
# come before "!" in the alternation. Without this, the Python
# re module, as late as version 2.2.2, empirically matches the
# "!" in "!=" first, instead of finding the longest match.
# What's up with that?
l = CPP_to_Python_Ops_Dict.keys()
l.sort(lambda a, b: cmp(len(b), len(a)))
# Turn the list of keys into one regular expression that will allow us
# to substitute all of the operators at once.
expr = string.join(map(re.escape, l), '|')
# ...and compile the expression.
CPP_to_Python_Ops_Expression = re.compile(expr)
# A separate list of expressions to be evaluated and substituted
# sequentially, not all at once.
CPP_to_Python_Eval_List = [
['defined\s+(\w+)', '__dict__.has_key("\\1")'],
['defined\s*\((\w+)\)', '__dict__.has_key("\\1")'],
['/\*.*\*/', ''],
['/\*.*', ''],
['//.*', ''],
['(0x[0-9A-Fa-f]*)[UL]+', '\\1L'],
]
# Replace the string representations of the regular expressions in the
# list with compiled versions.
for l in CPP_to_Python_Eval_List:
l[0] = re.compile(l[0])
# Wrap up all of the above into a handy function.
def CPP_to_Python(s):
"""
Converts a C pre-processor expression into an equivalent
Python expression that can be evaluated.
"""
s = CPP_to_Python_Ops_Expression.sub(CPP_to_Python_Ops_Sub, s)
for expr, repl in CPP_to_Python_Eval_List:
s = expr.sub(repl, s)
return s
del expr
del l
del override
class FunctionEvaluator:
"""
Handles delayed evaluation of a #define function call.
"""
def __init__(self, name, args, expansion):
"""
Squirrels away the arguments and expansion value of a #define
macro function for later evaluation when we must actually expand
a value that uses it.
"""
self.name = name
self.args = function_arg_separator.split(args)
try:
expansion = string.split(expansion, '##')
except (AttributeError, TypeError):
# Python 1.5 throws TypeError if "expansion" isn't a string,
# later versions throw AttributeError.
pass
self.expansion = expansion
def __call__(self, *values):
"""
Evaluates the expansion of a #define macro function called
with the specified values.
"""
if len(self.args) != len(values):
raise ValueError, "Incorrect number of arguments to `%s'" % self.name
# Create a dictionary that maps the macro arguments to the
# corresponding values in this "call." We'll use this when we
# eval() the expansion so that arguments will get expanded to
# the right values.
locals = {}
for k, v in zip(self.args, values):
locals[k] = v
parts = []
for s in self.expansion:
if not s in self.args:
s = repr(s)
parts.append(s)
statement = string.join(parts, ' + ')
return eval(statement, globals(), locals)
# Find line continuations.
line_continuations = re.compile('\\\\\r?\n')
# Search for a "function call" macro on an expansion. Returns the
# two-tuple of the "function" name itself, and a string containing the
# arguments within the call parentheses.
function_name = re.compile('(\S+)\(([^)]*)\)')
# Split a string containing comma-separated function call arguments into
# the separate arguments.
function_arg_separator = re.compile(',\s*')
class PreProcessor:
"""
The main workhorse class for handling C pre-processing.
"""
def __init__(self, current=os.curdir, cpppath=(), dict={}, all=0):
global Table
cpppath = tuple(cpppath)
self.searchpath = {
'"' : (current,) + cpppath,
'<' : cpppath + (current,),
}
# Initialize our C preprocessor namespace for tracking the
# values of #defined keywords. We use this namespace to look
# for keywords on #ifdef/#ifndef lines, and to eval() the
# expressions on #if/#elif lines (after massaging them from C to
# Python).
self.cpp_namespace = dict.copy()
self.cpp_namespace['__dict__'] = self.cpp_namespace
if all:
self.do_include = self.all_include
# For efficiency, a dispatch table maps each C preprocessor
# directive (#if, #define, etc.) to the method that should be
# called when we see it. We accomodate state changes (#if,
# #ifdef, #ifndef) by pushing the current dispatch table on a
# stack and changing what method gets called for each relevant
# directive we might see next at this level (#else, #elif).
# #endif will simply pop the stack.
d = {
'scons_current_file' : self.scons_current_file
}
for op in Table.keys():
d[op] = getattr(self, 'do_' + op)
self.default_table = d
# Controlling methods.
def tupleize(self, contents):
"""
Turns the contents of a file into a list of easily-processed
tuples describing the CPP lines in the file.
The first element of each tuple is the line's preprocessor
directive (#if, #include, #define, etc., minus the initial '#').
The remaining elements are specific to the type of directive, as
pulled apart by the regular expression.
"""
global CPP_Expression, Table
contents = line_continuations.sub('', contents)
cpp_tuples = CPP_Expression.findall(contents)
return map(lambda m, t=Table:
(m[0],) + t[m[0]].match(m[1]).groups(),
cpp_tuples)
def __call__(self, file):
"""
Pre-processes a file.
This is the main public entry point.
"""
self.current_file = file
return self.process_contents(self.read_file(file), file)
def process_contents(self, contents, fname=None):
"""
Pre-processes a file contents.
This is the main internal entry point.
"""
self.stack = []
self.dispatch_table = self.default_table.copy()
self.current_file = fname
self.tuples = self.tupleize(contents)
self.initialize_result(fname)
while self.tuples:
t = self.tuples.pop(0)
# Uncomment to see the list of tuples being processed (e.g.,
# to validate the CPP lines are being translated correctly).
#print t
self.dispatch_table[t[0]](t)
return self.finalize_result(fname)
# Dispatch table stack manipulation methods.
def save(self):
"""
Pushes the current dispatch table on the stack and re-initializes
the current dispatch table to the default.
"""
self.stack.append(self.dispatch_table)
self.dispatch_table = self.default_table.copy()
def restore(self):
"""
Pops the previous dispatch table off the stack and makes it the
current one.
"""
try: self.dispatch_table = self.stack.pop()
except IndexError: pass
# Utility methods.
def do_nothing(self, t):
"""
Null method for when we explicitly want the action for a
specific preprocessor directive to do nothing.
"""
pass
def scons_current_file(self, t):
self.current_file = t[1]
def eval_expression(self, t):
"""
Evaluates a C preprocessor expression.
This is done by converting it to a Python equivalent and
eval()ing it in the C preprocessor namespace we use to
track #define values.
"""
t = CPP_to_Python(string.join(t[1:]))
try: return eval(t, self.cpp_namespace)
except (NameError, TypeError): return 0
def initialize_result(self, fname):
self.result = [fname]
def finalize_result(self, fname):
return self.result[1:]
def find_include_file(self, t):
"""
Finds the #include file for a given preprocessor tuple.
"""
fname = t[2]
for d in self.searchpath[t[1]]:
if d == os.curdir:
f = fname
else:
f = os.path.join(d, fname)
if os.path.isfile(f):
return f
return None
def read_file(self, file):
return open(file).read()
# Start and stop processing include lines.
def start_handling_includes(self, t=None):
"""
Causes the PreProcessor object to start processing #import,
#include and #include_next lines.
This method will be called when a #if, #ifdef, #ifndef or #elif
evaluates True, or when we reach the #else in a #if, #ifdef,
#ifndef or #elif block where a condition already evaluated
False.
"""
d = self.dispatch_table
d['import'] = self.do_import
d['include'] = self.do_include
d['include_next'] = self.do_include
def stop_handling_includes(self, t=None):
"""
Causes the PreProcessor object to stop processing #import,
#include and #include_next lines.
This method will be called when a #if, #ifdef, #ifndef or #elif
evaluates False, or when we reach the #else in a #if, #ifdef,
#ifndef or #elif block where a condition already evaluated True.
"""
d = self.dispatch_table
d['import'] = self.do_nothing
d['include'] = self.do_nothing
d['include_next'] = self.do_nothing
# Default methods for handling all of the preprocessor directives.
# (Note that what actually gets called for a given directive at any
# point in time is really controlled by the dispatch_table.)
def _do_if_else_condition(self, condition):
"""
Common logic for evaluating the conditions on #if, #ifdef and
#ifndef lines.
"""
self.save()
d = self.dispatch_table
if condition:
self.start_handling_includes()
d['elif'] = self.stop_handling_includes
d['else'] = self.stop_handling_includes
else:
self.stop_handling_includes()
d['elif'] = self.do_elif
d['else'] = self.start_handling_includes
def do_ifdef(self, t):
"""
Default handling of a #ifdef line.
"""
self._do_if_else_condition(self.cpp_namespace.has_key(t[1]))
def do_ifndef(self, t):
"""
Default handling of a #ifndef line.
"""
self._do_if_else_condition(not self.cpp_namespace.has_key(t[1]))
def do_if(self, t):
"""
Default handling of a #if line.
"""
self._do_if_else_condition(self.eval_expression(t))
def do_elif(self, t):
"""
Default handling of a #elif line.
"""
d = self.dispatch_table
if self.eval_expression(t):
self.start_handling_includes()
d['elif'] = self.stop_handling_includes
d['else'] = self.stop_handling_includes
def do_else(self, t):
"""
Default handling of a #else line.
"""
pass
def do_endif(self, t):
"""
Default handling of a #endif line.
"""
self.restore()
def do_define(self, t):
"""
Default handling of a #define line.
"""
_, name, args, expansion = t
try:
expansion = int(expansion)
except (TypeError, ValueError):
pass
if args:
evaluator = FunctionEvaluator(name, args[1:-1], expansion)
self.cpp_namespace[name] = evaluator
else:
self.cpp_namespace[name] = expansion
def do_undef(self, t):
"""
Default handling of a #undef line.
"""
try: del self.cpp_namespace[t[1]]
except KeyError: pass
def do_import(self, t):
"""
Default handling of a #import line.
"""
# XXX finish this -- maybe borrow/share logic from do_include()...?
pass
def do_include(self, t):
"""
Default handling of a #include line.
"""
t = self.resolve_include(t)
include_file = self.find_include_file(t)
if include_file:
#print "include_file =", include_file
self.result.append(include_file)
contents = self.read_file(include_file)
new_tuples = [('scons_current_file', include_file)] + \
self.tupleize(contents) + \
[('scons_current_file', self.current_file)]
self.tuples[:] = new_tuples + self.tuples
# Date: Tue, 22 Nov 2005 20:26:09 -0500
# From: Stefan Seefeld <[email protected]>
#
# By the way, #include_next is not the same as #include. The difference
# being that #include_next starts its search in the path following the
# path that let to the including file. In other words, if your system
# include paths are ['/foo', '/bar'], and you are looking at a header
# '/foo/baz.h', it might issue an '#include_next <baz.h>' which would
# correctly resolve to '/bar/baz.h' (if that exists), but *not* see
# '/foo/baz.h' again. See http://www.delorie.com/gnu/docs/gcc/cpp_11.html
# for more reasoning.
#
# I have no idea in what context 'import' might be used.
# XXX is #include_next really the same as #include ?
do_include_next = do_include
# Utility methods for handling resolution of include files.
def resolve_include(self, t):
"""Resolve a tuple-ized #include line.
This handles recursive expansion of values without "" or <>
surrounding the name until an initial " or < is found, to handle
#include FILE
where FILE is a #define somewhere else.
"""
s = t[1]
while not s[0] in '<"':
#print "s =", s
try:
s = self.cpp_namespace[s]
except KeyError:
m = function_name.search(s)
s = self.cpp_namespace[m.group(1)]
if callable(s):
args = function_arg_separator.split(m.group(2))
s = apply(s, args)
if not s:
return None
return (t[0], s[0], s[1:-1])
def all_include(self, t):
"""
"""
self.result.append(self.resolve_include(t))
class DumbPreProcessor(PreProcessor):
"""A preprocessor that ignores all #if/#elif/#else/#endif directives
and just reports back *all* of the #include files (like the classic
SCons scanner did).
This is functionally equivalent to using a regular expression to
find all of the #include lines, only slower. It exists mainly as
an example of how the main PreProcessor class can be sub-classed
to tailor its behavior.
"""
def __init__(self, *args, **kw):
apply(PreProcessor.__init__, (self,)+args, kw)
d = self.default_table
for func in ['if', 'elif', 'else', 'endif', 'ifdef', 'ifndef']:
d[func] = d[func] = self.do_nothing
del __revision__
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
bsd-3-clause
|
alfanugraha/LUMENS-repo
|
processing/gdal/pct2rgb.py
|
4
|
2876
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
pct2rgb.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4 import QtGui
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.tools.system import *
from processing.parameters.ParameterRaster import ParameterRaster
from processing.parameters.ParameterSelection import ParameterSelection
from processing.outputs.OutputRaster import OutputRaster
from processing.gdal.GdalUtils import GdalUtils
class pct2rgb(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NBAND = 'NBAND'
def getIcon(self):
filepath = os.path.dirname(__file__) + '/icons/8-to-24-bits.png'
return QtGui.QIcon(filepath)
def defineCharacteristics(self):
self.name = 'PCT to RGB'
self.group = '[GDAL] Conversion'
self.addParameter(ParameterRaster(pct2rgb.INPUT, 'Input layer', False))
options = []
for i in range(25):
options.append(str(i + 1))
self.addParameter(ParameterSelection(pct2rgb.NBAND, 'Band to convert',
options))
self.addOutput(OutputRaster(pct2rgb.OUTPUT, 'Output layer'))
def processAlgorithm(self, progress):
arguments = []
arguments.append('-b')
arguments.append(str(self.getParameterValue(pct2rgb.NBAND) + 1))
arguments.append('-of')
out = self.getOutputValue(pct2rgb.OUTPUT)
arguments.append(GdalUtils.getFormatShortNameFromFilename(out))
arguments.append(self.getParameterValue(pct2rgb.INPUT))
arguments.append(out)
commands = []
if isWindows():
commands = ['cmd.exe', '/C ', 'pct2rgb.bat',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = ['pct2rgb.py', GdalUtils.escapeAndJoin(arguments)]
GdalUtils.runGdal(commands, progress)
|
gpl-2.0
|
zearom32/SmartBooks
|
books/migrations/0005_auto_20150413_2112.py
|
1
|
1457
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('books', '0004_goodsinfo'),
]
operations = [
migrations.AddField(
model_name='bookinfo',
name='author',
field=models.CharField(default='', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='bookinfo',
name='pages',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='bookinfo',
name='price',
field=models.DecimalField(default=0, max_digits=10, decimal_places=2),
preserve_default=False,
),
migrations.AddField(
model_name='bookinfo',
name='publidate',
field=models.CharField(default='', max_length=20),
preserve_default=False,
),
migrations.AddField(
model_name='bookinfo',
name='publisher',
field=models.CharField(default='', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='bookinfo',
name='title',
field=models.CharField(default='', max_length=100),
preserve_default=False,
),
]
|
mit
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/googlecloudsdk/core/document_renderers/renderer.py
|
1
|
2678
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud SDK markdown document renderer base class."""
import abc
from googlecloudsdk.core import log
# Font Attributes.
BOLD, ITALIC, CODE = range(3)
class Renderer(object):
"""Markdown renderer base class.
The member functions provide an abstract document model that matches markdown
entities to output document renderings.
Attributes:
_font: The font attribute bitmask.
_out: The output stream.
_title: The document tile.
_width: The output width in characters.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, out=None, title=None, width=80):
self._font = 0
self._out = out or log.out
self._title = title
self._width = width
def Entities(self, buf):
"""Converts special characters to their entity tags.
This is applied after font embellishments.
Args:
buf: The normal text that may contain special characters.
Returns:
The escaped string.
"""
return buf
def Escape(self, buf):
"""Escapes special characters in normal text.
This is applied before font embellishments.
Args:
buf: The normal text that may contain special characters.
Returns:
The escaped string.
"""
return buf
def Finish(self):
"""Finishes all output document rendering."""
pass
def Font(self, unused_attr, unused_out=None):
"""Returns the font embellishment string for attr.
Args:
unused_attr: None to reset to the default font, otherwise one of BOLD,
ITALIC, or CODE.
unused_out: Writes tags line to this stream if not None.
Returns:
The font embellishment string.
"""
return ''
def Link(self, target, text):
"""Renders an anchor.
Args:
target: The link target URL.
text: The text to be displayed instead of the link.
Returns:
The rendered link anchor and text.
"""
if text:
if target and '://' in target:
# Show non-local targets.
return '{0} ({1})'.format(text, target)
return text
if target:
return target
return '[]()'
|
bsd-3-clause
|
amith01994/intellij-community
|
python/lib/Lib/encodings/cp500.py
|
593
|
13377
|
""" Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'[' # 0x4A -> LEFT SQUARE BRACKET
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u']' # 0x5A -> RIGHT SQUARE BRACKET
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
apache-2.0
|
montefra/pyds9
|
pyds9/tests/test_pyds9.py
|
1
|
8747
|
from collections import Counter
import contextlib
import random
import subprocess as sp
import time
from astropy.io import fits
import numpy as np
import pytest
from pyds9 import pyds9
parametrize = pytest.mark.parametrize
type_mapping = parametrize('bitpix, dtype ',
[(8, np.dtype(np.uint8)),
(16, np.dtype(np.int16)),
(32, np.dtype(np.int32)),
(64, np.dtype(np.int64)),
(-32, np.dtype(np.float32)),
(-64, np.dtype(np.float64)),
(-16, np.dtype(np.uint16)),
pytest.mark.xfail(raises=ValueError,
reason='Wrong input')
((42, np.dtype(str)))
])
@pytest.fixture
def run_ds9s():
'''Returns a context manager that accepts a list of names and run a ds9
instance the for each name. On return from the yield, stop the instances'''
@contextlib.contextmanager
def _run_ds9s(*names):
processes = []
for name in names:
cmd = ['ds9', '-title', name]
processes.append(sp.Popen(cmd))
# wait for all the ds9 to come alive
while True:
targets = pyds9.ds9_targets()
if targets and len(targets) == len(processes):
break
time.sleep(0.1)
try:
yield
finally:
errors = []
for p in processes:
returncode = p.poll()
if returncode is None:
p.kill()
p.communicate()
elif returncode != 0:
errors.append([cmd, returncode])
if errors:
msg = 'Command {} failed with error {}.'
msgs = [msg.format(' '.join(e[0]), e[1]) for e in errors]
raise RuntimeError('\n'.join(msgs))
return _run_ds9s
@pytest.fixture
def ds9_title(run_ds9s):
'''Start a ds9 instance in a subprocess and returns its title'''
name = 'test.{}'.format(random.randint(0, 10000))
with run_ds9s(name):
yield name
@pytest.fixture
def ds9_obj(ds9_title):
'''returns the DS9 instance for ``ds9_title``'''
return pyds9.ds9_openlist(target='*' + ds9_title + '*')[0]
@type_mapping
def test_bp2np(dtype, bitpix):
"""Test from bitpix to dtype"""
output = pyds9._bp2np(bitpix)
assert output == dtype
@type_mapping
def test_np2bp(dtype, bitpix):
"""Test from dtype to bitpix"""
output = pyds9._np2bp(dtype)
assert output == bitpix
def test_ds9_targets_empty():
'''If no ds9 instance is running, ds9_targets returns None'''
targets = pyds9.ds9_targets()
assert targets is None
def test_ds9_targets(run_ds9s):
'''ds9_targets returns open ds9 names'''
names = ['test1', 'test1', 'test2']
with run_ds9s(*names):
targets = pyds9.ds9_targets()
assert len(targets) == len(names)
names = Counter(names)
for name, count in names.items():
assert sum(name in t for t in targets) == count
@pytest.mark.xfail(raises=ValueError, reason='No target ds9 instance')
def test_ds9_openlist_empty():
'''If no ds9 instance is running, ds9_openlist raises an exception'''
pyds9.ds9_openlist()
def test_ds9_openlist(run_ds9s):
'''ds9_openlist returns running ds9 instances'''
names = ['test1', 'test1', 'test2']
with run_ds9s(*names):
ds9s = pyds9.ds9_openlist()
target_is_id = [ds9.target == ds9.id for ds9 in ds9s]
assert len(ds9s) == len(names)
assert sum(target_is_id) == 2
@parametrize('meth, n_warning',
[('get_fits', 0), ('get_pyfits', 1)])
def test_ds9_get_fits(monkeypatch, ds9_obj, test_fits, meth, n_warning):
'''get a fits file as an astropy fits object'''
monkeypatch.setitem(pyds9.ds9Globals, 'pyfits', False)
ds9_obj.set('file {}'.format(test_fits))
with pytest.warns(None) as warn_records:
hdul_from_ds9 = getattr(ds9_obj, meth)()
assert isinstance(hdul_from_ds9, fits.HDUList)
assert len(warn_records) == n_warning
diff = fits.FITSDiff(test_fits.strpath, hdul_from_ds9,
ignore_comments=['*', ])
assert diff.identical
@pytest.mark.xfail(raises=ValueError, reason='Not an astropy hdu')
def test_ds9_set_fits_fail(ds9_obj):
'''set_fits wants an astropy HDUList'''
ds9_obj.set_fits('random_type')
@parametrize('meth, n_warning',
[('set_fits', 0), ('set_pyfits', 1)])
def test_ds9_set_fits(monkeypatch, tmpdir, ds9_obj, test_fits,
meth, n_warning):
'''Set the astropy fits'''
monkeypatch.setitem(pyds9.ds9Globals, 'pyfits', False)
with fits.open(test_fits.strpath) as hdul,\
pytest.warns(None) as warn_records:
success = getattr(ds9_obj, meth)(hdul)
assert success == 1
assert len(warn_records) == n_warning
out_fits = tmpdir.join('out.fits')
with out_fits.open('w') as f:
sp.call(['xpaget', ds9_obj.target, 'fits'], stdout=f)
diff = fits.FITSDiff(test_fits.strpath, out_fits.strpath,
ignore_comments=['*', ])
assert diff.identical
def test_ds9_get_pyfits(ds9_obj, test_fits):
'use pytest to get fits'
pyfits = pytest.importorskip('pyfits', minversion='0.2')
ds9_obj.set('file {}'.format(test_fits))
with pytest.warns(None) as warn_records:
hdul_from_ds9 = ds9_obj.get_pyfits()
assert isinstance(hdul_from_ds9, pyfits.HDUList)
assert len(warn_records) == 0
diff = pyfits.FITSDiff(test_fits.strpath, hdul_from_ds9,
ignore_comments=['*', ])
assert diff.identical
@pytest.mark.xfail(raises=ValueError, reason='Not an astropy hdu')
def test_ds9_set_pyfits_fail(ds9_obj):
'''set_fits wants an astropy HDUList'''
pytest.importorskip('pyfits', minversion='0.2')
ds9_obj.set_pyfits('random_type')
def test_ds9_set_pyfits(tmpdir, ds9_obj, test_fits):
'''Set the astropy fits'''
pyfits = pytest.importorskip('pyfits', minversion='0.2')
with pyfits.open(test_fits.strpath) as hdul,\
pytest.warns(None) as warn_records:
success = ds9_obj.set_pyfits(hdul)
assert success == 1
assert len(warn_records) == 0
out_fits = tmpdir.join('out.fits')
with out_fits.open('w') as f:
sp.call(['xpaget', ds9_obj.target, 'fits'], stdout=f)
diff = pyfits.FITSDiff(test_fits.strpath, out_fits.strpath,
ignore_comments=['*', ])
assert diff.identical
fits_names = parametrize('fits_name', ['test.fits', 'test_3D.fits'])
@fits_names
def test_get_arr2np(ds9_obj, test_data_dir, fits_name):
'''Get the data on ds9 as a numpy array'''
fits_file = test_data_dir.join(fits_name)
ds9_obj.set('file {}'.format(fits_file))
arr = ds9_obj.get_arr2np()
fits_data = fits.getdata(fits_file.strpath)
np.testing.assert_array_equal(arr, fits_data)
@pytest.mark.xfail(raises=ValueError,
reason='Not a numpy array or not valid shape')
@parametrize('input_', ['random_type', np.arange(5)])
def test_ds9_set_np2arr_fail(tmpdir, ds9_obj, input_):
'''Set the passing wrong arrays'''
ds9_obj.set_np2arr(input_)
@fits_names
def test_ds9_set_np2arr(tmpdir, ds9_obj, test_data_dir, fits_name):
'''Set the astropy fits'''
fits_file = test_data_dir.join(fits_name)
fits_data = fits.getdata(fits_file.strpath)
success = ds9_obj.set_np2arr(fits_data)
assert success == 1
out_fits = tmpdir.join('out.fits')
with out_fits.open('w') as f:
sp.call(['xpaget', ds9_obj.target, 'fits'], stdout=f)
np.testing.assert_array_equal(fits_data, fits.getdata(out_fits.strpath))
@parametrize('action, args',
[(getattr, ()),
pytest.mark.xfail(raises=AttributeError,
reason='The attribute is readonly')
((setattr, (42, )))])
@parametrize('attr', ['target', 'id', 'method'])
def test_ds9_readonly_props(ds9_obj, action, args, attr):
'''Make sure that readonly attributes are such'''
action(ds9_obj, attr, *args)
def test_ds9_extra_prop(ds9_title):
'''Regression test to make sure that issues like #34 don't happen
anymore'''
class DS9_(pyds9.DS9):
@property
def frame(self):
return self.get("frame")
@frame.setter
def frame(self, value):
self.set("frame {}".format(value))
ds9 = DS9_(target='*' + ds9_title + '*')
a = ds9.frame
ds9.frame = int(a) + 1
|
lgpl-2.1
|
kimw/shadowsocks
|
shadowsocks/crypto/util.py
|
16
|
5004
|
#!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import logging
def find_library_nt(name):
# modified from ctypes.util
# ctypes.util.find_library just returns first result he found
# but we want to try them all
# because on Windows, users may have both 32bit and 64bit version installed
import glob
results = []
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.isfile(fname):
results.append(fname)
if fname.lower().endswith(".dll"):
continue
fname += "*.dll"
files = glob.glob(fname)
if files:
results.extend(files)
return results
def load_library(path, search_symbol, library_name):
from ctypes import CDLL
try:
lib = CDLL(path)
if hasattr(lib, search_symbol):
logging.info('loading %s from %s', library_name, path)
return lib
else:
logging.warn('can\'t find symbol %s in %s', search_symbol,
path)
except Exception:
pass
return None
def find_library(possible_lib_names, search_symbol, library_name,
custom_path=None):
import ctypes.util
if custom_path:
return load_library(custom_path, search_symbol, library_name)
paths = []
if type(possible_lib_names) not in (list, tuple):
possible_lib_names = [possible_lib_names]
lib_names = []
for lib_name in possible_lib_names:
lib_names.append(lib_name)
lib_names.append('lib' + lib_name)
for name in lib_names:
if os.name == "nt":
paths.extend(find_library_nt(name))
else:
path = ctypes.util.find_library(name)
if path:
paths.append(path)
if not paths:
# We may get here when find_library fails because, for example,
# the user does not have sufficient privileges to access those
# tools underlying find_library on linux.
import glob
for name in lib_names:
patterns = [
'/usr/local/lib*/lib%s.*' % name,
'/usr/lib*/lib%s.*' % name,
'lib%s.*' % name,
'%s.dll' % name]
for pat in patterns:
files = glob.glob(pat)
if files:
paths.extend(files)
for path in paths:
lib = load_library(path, search_symbol, library_name)
if lib:
return lib
return None
def parse_mode(cipher_nme):
"""
Parse the cipher mode from cipher name
e.g. aes-128-gcm, the mode is gcm
:param cipher_nme: str cipher name, aes-128-cfb, aes-128-gcm ...
:return: str/None The mode, cfb, gcm ...
"""
hyphen = cipher_nme.rfind('-')
if hyphen > 0:
return cipher_nme[hyphen:]
return None
def run_cipher(cipher, decipher):
from os import urandom
import random
import time
block_size = 16384
rounds = 1 * 1024
plain = urandom(block_size * rounds)
cipher_results = []
pos = 0
print('test start')
start = time.time()
while pos < len(plain):
l = random.randint(100, 32768)
# print(pos, l)
c = cipher.encrypt_once(plain[pos:pos + l])
cipher_results.append(c)
pos += l
pos = 0
# c = b''.join(cipher_results)
plain_results = []
for c in cipher_results:
# l = random.randint(100, 32768)
l = len(c)
plain_results.append(decipher.decrypt_once(c))
pos += l
end = time.time()
print('speed: %d bytes/s' % (block_size * rounds / (end - start)))
assert b''.join(plain_results) == plain
def test_find_library():
assert find_library('c', 'strcpy', 'libc') is not None
assert find_library(['c'], 'strcpy', 'libc') is not None
assert find_library(('c',), 'strcpy', 'libc') is not None
assert find_library(('crypto', 'eay32'), 'EVP_CipherUpdate',
'libcrypto') is not None
assert find_library('notexist', 'strcpy', 'libnotexist') is None
assert find_library('c', 'symbol_not_exist', 'c') is None
assert find_library(('notexist', 'c', 'crypto', 'eay32'),
'EVP_CipherUpdate', 'libc') is not None
if __name__ == '__main__':
test_find_library()
|
apache-2.0
|
danluu/BitFunnel
|
NativeJIT/googletest/googletest/test/gtest_output_test.py
|
363
|
12259
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = '[email protected] (Zhanyong Wan)'
import difflib
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO([email protected]): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# 'internal_skip_environment_and_ad_hoc_tests' argument.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS and
not IS_WINDOWS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'r')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual,
'\n'.join(difflib.unified_diff(
normalized_golden.split('\n'),
normalized_actual.split('\n'),
'golden', 'actual')))
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
|
mit
|
markrawlingson/SickRage
|
lib/chardet/utf8prober.py
|
52
|
2709
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import ProbingState, MachineState
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8_SM_MODEL
class UTF8Prober(CharSetProber):
ONE_CHAR_PROB = 0.5
def __init__(self):
super(UTF8Prober, self).__init__()
self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
self._num_mb_chars = None
self.reset()
def reset(self):
super(UTF8Prober, self).reset()
self.coding_sm.reset()
self._num_mb_chars = 0
@property
def charset_name(self):
return "utf-8"
def feed(self, byte_str):
for c in byte_str:
coding_state = self.coding_sm.next_state(c)
if coding_state == MachineState.error:
self._state = ProbingState.not_me
break
elif coding_state == MachineState.its_me:
self._state = ProbingState.found_it
break
elif coding_state == MachineState.start:
if self.coding_sm.get_current_charlen() >= 2:
self._num_mb_chars += 1
if self.state == ProbingState.detecting:
if self.get_confidence() > self.SHORTCUT_THRESHOLD:
self._state = ProbingState.found_it
return self.state
def get_confidence(self):
unlike = 0.99
if self._num_mb_chars < 6:
unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars
return 1.0 - unlike
else:
return unlike
|
gpl-3.0
|
crysthianophp/el-shaddai
|
vendor/doctrine/orm/docs/en/_exts/configurationblock.py
|
2577
|
3506
|
#Copyright (c) 2010 Fabien Potencier
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
|
bsd-3-clause
|
madazone/fabric-bolt
|
src/fabric_bolt/core/settings/base.py
|
14
|
6914
|
# Global settings for core project.
import os
########## PATH CONFIGURATION
PROJECT_DIR = os.path.dirname(os.path.dirname(__file__))
PUBLIC_DIR = os.path.join(PROJECT_DIR, 'public')
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'fabric_bolt.core.wsgi.application'
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
DEBUG = False
TEMPLATE_DEBUG = True
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## URL CONFIGURATION
ROOT_URLCONF = 'fabric_bolt.core.urls'
########## END URL CONFIGURATION
########## GENERAL CONFIGURATION
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Location of fixtures for the project
FIXTURE_DIRS = (
os.path.join(PROJECT_DIR, 'fixtures'),
)
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PUBLIC_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PUBLIC_DIR, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## TEMPLATE CONFIGURATION
GRAPPELLI_ADMIN_TITLE = 'Admin'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'fabric_bolt.core.context_processors.sidebar_lists',
'sekizai.context_processors.sekizai',
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'stronghold.middleware.LoginRequiredMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## APP CONFIGURATION
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'grappelli',
'django.contrib.admin',
'south',
'sekizai',
'crispy_forms',
'stronghold',
'django_tables2',
'fabric_bolt.accounts',
'fabric_bolt.hosts',
'fabric_bolt.projects',
)
########## END APP CONFIGURATION
FABFILE_PATH = os.path.join(os.path.dirname(PROJECT_DIR), 'fabfile.py')
########## STRONGHOLD CONFIGURATION
LOGIN_URL = '/login/'
STRONGHOLD_PUBLIC_URLS = (
'^/login/',
'^/logout/',
'^/reset/.+/$',
)
########## END STRONGHOLD CONFIGURATION
########## CRISPY CONFIGURATION
CRISPY_TEMPLATE_PACK = "bootstrap3"
########## END CRISPY CONFIGURATION
########## EMAIL CONFIGURATION
AUTH_USER_MODEL = 'accounts.DeployUser'
########## END EMAIL CONFIGURATION
########## EMAIL CONFIGURATION
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
EMAIL_USE_TLS = False
########## END EMAIL CONFIGURATION
########## LOGGING CONFIGURATION
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
|
mit
|
korobool/nlp4go
|
test_data/prepare_data.py
|
2
|
1589
|
import json
from nltk.tokenize import TreebankWordTokenizer
def read_referrence_data(filename):
with open(filename, "r") as f:
lines = f.readlines()
return [l.strip('\n\r') for l in lines]
def span_tokenize(text):
tokens = TreebankWordTokenizer().tokenize(text)
dt = 0
end = 0
spaned_tokens = []
for token in tokens:
raw_token = token
is_quote_end = False
is_quote_start = False
is_ellipsis = False
if token == '``':
raw_token = '"'
is_quote_start = True
elif token == "''":
raw_token = '"'
is_quote_end = True
elif token == "...":
is_ellipsis = True
start = text[dt:].find(raw_token)
if start != -1:
end = start + len(raw_token)
spaned_tokens.append({
"word": token,
"runes": [ord(c) for c in token],
"pos": dt+start,
"pos_end": dt+end,
"is_quote_start": is_quote_start,
"is_quote_end": is_quote_end,
"is_ellipsis": is_ellipsis,
})
dt += end
return spaned_tokens
if __name__ == "__main__":
ref_sentences = read_referrence_data("sentences.en.txt")
with open('sentences.en.json', 'w') as outfile:
sent =[]
for sentence in ref_sentences:
sent.append({
"sentence": sentence,
"tokens": span_tokenize(sentence),
})
json.dump(sent, outfile, indent=4)
|
mit
|
zang-cloud/zang-python
|
zang/connectors/carrier_services_connector.py
|
2
|
5234
|
# -*- coding: utf-8 -*-
"""
zang.connectors.carrier_services_connector
~~~~~~~~~~~~~~~~~~~
Module for communication with `Carrier` endpoint
"""
from zang.connectors.base_connector import BaseConnector
from zang.helpers.helpers import flatDict
from zang.domain.carrier_lookup import CarrierLookup
from zang.domain.list.carrier_lookups import CarrierLookups
from zang.domain.cnam_lookup import CnamLookup
from zang.domain.list.cnam_lookups import CnamLookups
from zang.domain.bna_lookup import BnaLookup
from zang.domain.list.bna_lookups import BnaLookups
class CarrierServicesConnector(BaseConnector):
"""
Used for all forms of communication with the `Lookups`
endpoint of the Zang REST API.
.. seealso:: zang.connectors.connector_factory.ConnectorFactory
"""
def viewCarrierLookup(self, phoneNumber):
"""
The Carrier Lookup API allows you to retrieve additional information
about a phone number.
:param phoneNumber: Phone numbers to do a lookup for.
:type phoneNumber: str
:return: `CarrierLookup` object
:rtype: zang.domain.carrier_lookup.CarrierLookup
:raises ZangException:
"""
bodyParams = {
'PhoneNumber': phoneNumber,
}
data = flatDict(bodyParams)
carrierLookup = self._executor.update(
('Lookups', 'Carrier'), CarrierLookup, data)
return carrierLookup
def listCarrierLookups(self, page=None, pageSize=None):
"""
Shows info on all carrier lookups associated with some account
:param page: (optional) Used to return a particular page within the
list.
:param pageSize: (optional) Used to specify the amount of list items
to return per page.
:type page: int
:type pageSize: int
:return: `CarrierLookups` object
:rtype: zang.domain.list.carrier_lookups.CarrierLookups
:raises ZangException:
"""
queryParams = {
'Page': page,
'PageSize': pageSize,
}
params = flatDict(queryParams)
carrierLookups = self._executor.read(
('Lookups', 'Carrier'), CarrierLookups, params)
return carrierLookups
def viewCnamLookup(self, phoneNumber):
"""
Shows a CNAM information on some phone number
:param phoneNumber: The number of the phone you are attempting to
perform the CNAM lookup on. Multiple PhoneNumbers to lookup can
be specified in a single request.
:type phoneNumber: str
:return: `CnamLookup` object
:rtype: zang.domain.cnam_lookup.CnamLookup
:raises ZangException:
"""
bodyParams = {
'PhoneNumber': phoneNumber,
}
data = flatDict(bodyParams)
cnamLookup = self._executor.update(
('Lookups', 'Cnam'), CnamLookup, data)
return cnamLookup
def listCnamLookups(self, page=None, pageSize=None):
"""
Shows info on all CNAM lookups associated with some account
:param page: (optional) Used to return a particular page within the
list.
:param pageSize: (optional) Used to specify the amount of list items
to return per page.
:type page: int
:type pageSize: int
:return: `CnamLookups` object
:rtype: zang.domain.list.cnam_lookups.CnamLookups
:raises ZangException:
"""
queryParams = {
'Page': page,
'PageSize': pageSize,
}
params = flatDict(queryParams)
cnamLookups = self._executor.read(
('Lookups', 'Cnam'), CnamLookups, params)
return cnamLookups
def viewBnaLookup(self, phoneNumber):
"""
Shows information on billing name address for some phone number.
:param phoneNumber: The number of the phone you are attempting to
perform the BNA lookup on. Multiple PhoneNumbers to lookup can be
specified in a single request.
:type phoneNumber: str
:return: `BnaLookup` object
:rtype: zang.domain.bna_lookup.BnaLookup
:raises ZangException:
"""
bodyParams = {
'PhoneNumber': phoneNumber,
}
data = flatDict(bodyParams)
bnaLookup = self._executor.update(
('Lookups', 'Bna'), BnaLookup, data)
return bnaLookup
def listBnaLookups(self, page=None, pageSize=None):
"""
Shows info on all BNA lookups associated with some account.
:param page: (optional) Used to return a particular page within the
list.
:param pageSize: (optional) Used to specify the amount of list items
to return per page.
:type page: int
:type pageSize: int
:return: `BnaLookups` object
:rtype: zang.domain.list.bna_lookups.BnaLookups
:raises ZangException:
"""
queryParams = {
'Page': page,
'PageSize': pageSize,
}
params = flatDict(queryParams)
bnaLookups = self._executor.read(
('Lookups', 'Bna'), BnaLookups, params)
return bnaLookups
|
mit
|
uci-cbcl/tree-hmm
|
setup.py
|
2
|
4290
|
import sys
from distutils.core import setup
from distutils.extension import Extension
try:
from Cython.Distutils import build_ext
except ImportError:
sys.stderr.write("""
==================================================
Please install Cython (http://cython.org/),
which is required to build tree-hmm. Usually
you can do:
pip install -U cython
or
easy_install -U cython
==================================================
""")
sys.exit(1)
try:
import numpy
except ImportError:
sys.stderr.write("""
==================================================
Please install numpy,
which is required to build tree-hmm. Usually
you can do:
pip install -U numpy
or
easy_install -U numpy
==================================================
""")
sys.exit(1)
ext_modules = [Extension("treehmm.vb_mf", ["treehmm/vb_mf.pyx"],
#extra_compile_args=['-fopenmp', '-march=bdver1', '-mtune=bdver1', '-Ofast'],
#extra_link_args=['-fopenmp', '-march=bdver1', '-mtune=bdver1', '-Ofast'],
# extra_compile_args=['-fopenmp', '-I/data/apps/enthought_python/7.3.2/lib/python2.7/site-packages/numpy/core/include', '-L/data/apps/enthought_python/7.3.2/lib/'],
# extra_link_args=['-fopenmp', '-L/data/apps/enthought_python/7.3.2/lib/'],
include_dirs=[numpy.get_include()]),
Extension("treehmm.vb_prodc", ["treehmm/vb_prodc.pyx"],
#extra_compile_args=['-fopenmp', '-march=bdver1', '-mtune=bdver1', '-Ofast'],
#extra_link_args=['-fopenmp', '-march=bdver1', '-mtune=bdver1', '-Ofast'],
# extra_compile_args=['-fopenmp', '-I/data/apps/enthought_python/7.3.2/lib/python2.7/site-packages/numpy/core/include', '-L/data/apps/enthought_python/7.3.2/lib/'],
# extra_link_args=['-fopenmp', '-L/data/apps/enthought_python/7.3.2/lib/'],
include_dirs=[numpy.get_include()]),
Extension("treehmm.vb_prodc_sepTheta", ["treehmm/vb_prodc_sepTheta.pyx"],
#extra_compile_args=['-fopenmp', '-march=bdver1', '-mtune=bdver1', '-Ofast'],
#extra_link_args=['-fopenmp', '-march=bdver1', '-mtune=bdver1', '-Ofast'],
# extra_compile_args=['-fopenmp', '-I/data/apps/enthought_python/7.3.2/lib/python2.7/site-packages/numpy/core/include', '-L/data/apps/enthought_python/7.3.2/lib/'],
# extra_link_args=['-fopenmp', '-L/data/apps/enthought_python/7.3.2/lib/'],
include_dirs=[numpy.get_include()]),
Extension("treehmm.clique_hmm", ["treehmm/vb_clique.pyx"],
#extra_compile_args=['-fopenmp', '-march=bdver1', '-mtune=bdver1', '-Ofast'],
#extra_link_args=['-fopenmp', '-march=bdver1', '-mtune=bdver1', '-Ofast'],
# extra_compile_args=['-fopenmp', '-I/data/apps/enthought_python/7.3.2/lib/python2.7/site-packages/numpy/core/include', '-L/data/apps/enthought_python/7.3.2/lib/'],
# extra_link_args=['-fopenmp', '-L/data/apps/enthought_python/7.3.2/lib/'],
include_dirs=[numpy.get_include()]),
]
install_requires = ['scipy', 'cython ( >= 0.15.1)']
if sys.version_info[:2] < (2, 7):
install_requires += ['argparse']
setup(
name = 'treehmm',
description = 'Variational Inference for tree-structured Hidden-Markov Models',
version = '0.1.0',
author = 'Jake Biesinger, Yuanfeng Wang, Xiaohui Xie',
author_email = '[email protected]',
url = 'https://github.com/uci-cbcl/tree-hmm',
long_description = open('README.rst').read(),
packages = ['treehmm', 'gmtkParam'],
scripts = ['bin/tree-hmm'],
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules,
requires = ['scipy', 'cython ( >= 0.15.1)', 'numpy'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering :: Bio-Informatics'],
)
|
bsd-3-clause
|
K-Carrington/ardupilot
|
Tools/scripts/frame_sizes.py
|
351
|
1117
|
#!/usr/bin/env python
import re, sys, operator, os
code_line = re.compile("^\s*\d+:/")
frame_line = re.compile("^\s*\d+\s+/\* frame size = (\d+) \*/")
class frame(object):
def __init__(self, code, frame_size):
self.code = code
self.frame_size = int(frame_size)
frames = []
def process_lst(filename):
'''process one lst file'''
last_code = ''
h = open(filename, mode='r')
for line in h:
if code_line.match(line):
last_code = line.strip()
elif frame_line.match(line):
frames.append(frame(last_code, frame_line.match(line).group(1)))
h.close()
if len(sys.argv) > 1:
dname = sys.argv[1]
else:
dname = '.'
for root, dirs, files in os.walk(dname):
for f in files:
if f.endswith(".lst"):
process_lst(os.path.join(root, f))
sorted_frames = sorted(frames,
key=operator.attrgetter('frame_size'),
reverse=True)
print("FrameSize Code")
for frame in sorted_frames:
if frame.frame_size > 0:
print("%9u %s" % (frame.frame_size, frame.code))
|
gpl-3.0
|
tanmaykm/edx-platform
|
common/djangoapps/dark_lang/views.py
|
6
|
6175
|
"""
Views file for the Darklang Django App
"""
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils.translation import ugettext as _
from django.views.generic.base import View
from openedx.core.djangoapps.user_api.preferences.api import (
delete_user_preference, get_user_preference, set_user_preference
)
from openedx.core.lib.api.view_utils import view_auth_classes
from dark_lang import DARK_LANGUAGE_KEY
from dark_lang.models import DarkLangConfig
from edxmako.shortcuts import render_to_response
from lang_pref import LANGUAGE_KEY
LANGUAGE_INPUT_FIELD = 'preview_lang'
@view_auth_classes()
class DarkLangView(View):
"""
View used when a user is attempting to change the preview language using Darklang.
Expected Behavior:
GET - returns a form for setting/resetting the user's dark language
POST - updates or clears the setting to the given dark language
"""
template_name = 'darklang/preview_lang.html'
@method_decorator(login_required)
def get(self, request):
"""
Returns the Form for setting/resetting a User's dark language setting
Arguments:
request (Request): The Django Request Object
Returns:
HttpResponse: View containing the form for setting the preview lang
"""
context = {
'disable_courseware_js': True,
'uses_pattern_library': True
}
return render_to_response(self.template_name, context)
@method_decorator(login_required)
def post(self, request):
"""
Sets or clears the DarkLang depending on the incoming post data.
Arguments:
request (Request): The Django Request Object
Returns:
HttpResponse: View containing the form for setting the preview lang with the status
included in the context
"""
return self.process_darklang_request(request)
def process_darklang_request(self, request):
"""
Proccess the request to Set or clear the DarkLang depending on the incoming request.
Arguments:
request (Request): The Django Request Object
Returns:
HttpResponse: View containing the form for setting the preview lang with the status
included in the context
"""
context = {
'disable_courseware_js': True,
'uses_pattern_library': True
}
response = None
if not DarkLangConfig.current().enabled:
message = _('Preview Language is currently disabled')
context.update({'form_submit_message': message})
context.update({'success': False})
response = render_to_response(self.template_name, context, request=request)
elif 'set_language' in request.POST:
# Set the Preview Language
response = self._set_preview_language(request, context)
elif 'reset' in request.POST:
# Reset and clear the language preference
response = self._clear_preview_language(request, context)
return response
def _set_preview_language(self, request, context):
"""
Set the Preview language
Arguments:
request (Request): The incoming Django Request
context dict: The basic context for the Response
Returns:
HttpResponse: View containing the form for setting the preview lang with the status
included in the context
"""
message = None
show_refresh_message = False
preview_lang = request.POST.get(LANGUAGE_INPUT_FIELD, '')
if not preview_lang.strip():
message = _('Language code not provided')
else:
# Set the session key to the requested preview lang
request.session[LANGUAGE_SESSION_KEY] = preview_lang
# Make sure that we set the requested preview lang as the dark lang preference for the
# user, so that the lang_pref middleware doesn't clobber away the dark lang preview.
auth_user = request.user
if auth_user:
set_user_preference(request.user, DARK_LANGUAGE_KEY, preview_lang)
message = _('Language set to language code: {preview_language_code}').format(
preview_language_code=preview_lang
)
show_refresh_message = True
context.update({'form_submit_message': message})
context.update({'success': show_refresh_message})
response = render_to_response(self.template_name, context)
return response
def _clear_preview_language(self, request, context):
"""
Clears the dark language preview
Arguments:
request (Request): The incoming Django Request
context dict: The basic context for the Response
Returns:
HttpResponse: View containing the form for setting the preview lang with the status
included in the context
"""
# delete the session language key (if one is set)
if LANGUAGE_SESSION_KEY in request.session:
del request.session[LANGUAGE_SESSION_KEY]
user_pref = ''
auth_user = request.user
if auth_user:
# Reset user's dark lang preference to null
delete_user_preference(auth_user, DARK_LANGUAGE_KEY)
# Get & set user's preferred language
user_pref = get_user_preference(auth_user, LANGUAGE_KEY)
if user_pref:
request.session[LANGUAGE_SESSION_KEY] = user_pref
if user_pref is None:
message = _('Language reset to the default language code')
else:
message = _("Language reset to user's preference: {preview_language_code}").format(
preview_language_code=user_pref
)
context.update({'form_submit_message': message})
context.update({'success': True})
return render_to_response(self.template_name, context)
|
agpl-3.0
|
LegitInc/legitapi
|
tests/live_tests/live_test.py
|
1
|
3234
|
# Copyright (C) 2013 Rob Boyle / Legit Inc
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses
import random
import string
import threading
import urllib
import oauth2 as oauth
import time
PII_FIELDS = ("name", "address", "ssn", "phone", "email", "facebook_id",
"twitter_id", "linkedin_id")
PII_LEN = 8
def random_string(length):
return ''.join(random.choice(string.ascii_lowercase) for x in range(length))
def generate_user(user_id):
data = {}
data["user_id"] = user_id
for i in range(random.randint(1,len(PII_FIELDS))):
data[random.choice([f for f in PII_FIELDS if f not in data])] = random_string(PII_LEN)
return data
def generate_rep():
data = {}
data["date_joined"] = "2010-02-17"
data["transaction_count"] = random.randint(0, 10)
data["review_count"] = random.randint(0, 10)
data["positive_review_percentage"] = random.randint(0, 100)
return data
def add_users(key, secret, start_at, count, errors, success):
url = "https://APP_URL/api/submit/user"
consumer = oauth.Consumer(key=key, secret=secret)
client = oauth.Client(consumer)
for i in range(start_at, start_at+count):
user_id = i+1
user_data = generate_user(user_id)
rep_data = generate_rep()
user_data.update(rep_data)
resp, content = client.request(url, "POST", urllib.urlencode(user_data))
if resp.status != 200:
errors.append(content)
else:
success.append(content)
THREAD_COUNT = 8
TOTAL_USERS = 64
def live_test(consumer_key, consumer_secret):
start_time = time.time()
per_thread = TOTAL_USERS / THREAD_COUNT
threads = []
errors = []
success = []
for i in range(THREAD_COUNT):
start_at = i * per_thread
t = threading.Thread(target=add_users,
args=(consumer_key, consumer_secret,
start_at, per_thread, errors, success))
threads.append(t)
t.start()
for t in threads:
t.join()
end_time = time.time()
print "ELAPSED TIME: %.2f seconds" % (end_time - start_time)
print "USERS ADDED: %d" % len(success)
print "ERRORS: %s" % len(errors)
print "*** ERROR CONTENT ***"
for e in errors:
print e
if __name__ == "__main__":
live_test("QXYQQNfX3jn49ecTyfI18ueQLZnOfRAZ",
"sebw4Nxhp7F3EuYzlpal1o0IjQkcNm9u")
|
gpl-3.0
|
ChrisAntaki/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
|
113
|
18377
|
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The Manager runs a series of tests (TestType interface) against a set
of test files. If a test file fails a TestType, it returns a list of TestFailure
objects to the Manager. The Manager then aggregates the TestFailures to
create a final report.
"""
import json
import logging
import random
import sys
import time
from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.layout_package import json_layout_results_generator
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models.test_input import TestInput
_log = logging.getLogger(__name__)
# Builder base URL where we have the archived test results.
BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
TestExpectations = test_expectations.TestExpectations
class Manager(object):
"""A class for managing running a series of tests on a series of layout
test files."""
def __init__(self, port, options, printer):
"""Initialize test runner data structures.
Args:
port: an object implementing port-specific
options: a dictionary of command line options
printer: a Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
self.PERF_SUBDIR = 'perf'
self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
# disable wss server. need to install pyOpenSSL on buildbots.
# self._websocket_secure_server = websocket_server.PyWebSocket(
# options.results_directory, use_tls=True, port=9323)
self._results_directory = self._port.results_directory()
self._finder = LayoutTestFinder(self._port, self._options)
self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
def _collect_tests(self, args):
return self._finder.find_tests(self._options, args)
def _is_http_test(self, test):
return self.HTTP_SUBDIR in test or self._is_websocket_test(test)
def _is_websocket_test(self, test):
return self.WEBSOCKET_SUBDIR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
tests_to_run = [test for test in test_names if test not in tests_to_skip]
# Create a sorted list of test files so the subset chunk,
# if used, contains alphabetically consecutive tests.
if self._options.order == 'natural':
tests_to_run.sort(key=self._port.test_key)
elif self._options.order == 'random':
random.shuffle(tests_to_run)
tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
self._expectations.add_skipped_tests(tests_in_other_chunks)
tests_to_skip.update(tests_in_other_chunks)
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file):
return TestInput(test_file,
self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
self._test_requires_lock(test_file))
def _test_requires_lock(self, test_file):
"""Return True if the test needs to be locked when
running multiple copies of NRWTs. Perf tests are locked
because heavy load caused by running other tests in parallel
might cause some of them to timeout."""
return self._is_http_test(test_file) or self._is_perf_test(test_file)
def _test_is_slow(self, test_file):
return self._expectations.has_modifier(test_file, test_expectations.SLOW)
def needs_servers(self, test_names):
return any(self._test_requires_lock(test_name) for test_name in test_names) and self._options.http
def _set_up_run(self, test_names):
self._printer.write_update("Checking build ...")
if not self._port.check_build(self.needs_servers(test_names)):
_log.error("Build check failed")
return False
# This must be started before we check the system dependencies,
# since the helper may do things to make the setup correct.
if self._options.pixel_tests:
self._printer.write_update("Starting pixel test helper ...")
self._port.start_helper()
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
self._printer.write_update("Checking system dependencies ...")
if not self._port.check_sys_deps(self.needs_servers(test_names)):
self._port.stop_helper()
return False
if self._options.clobber_old_results:
self._clobber_old_results()
# Create the output directory if it doesn't already exist.
self._port.host.filesystem.maybe_make_directory(self._results_directory)
self._port.setup_test_run()
return True
def run(self, args):
"""Run the tests and return a RunDetails object with the results."""
self._printer.write_update("Collecting tests ...")
try:
paths, test_names = self._collect_tests(args)
except IOError:
# This is raised if --test-list doesn't exist
return test_run_results.RunDetails(exit_code=-1)
self._printer.write_update("Parsing expectations ...")
self._expectations = test_expectations.TestExpectations(self._port, test_names)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test.
if not tests_to_run:
_log.critical('No tests to run.')
return test_run_results.RunDetails(exit_code=-1)
if not self._set_up_run(tests_to_run):
return test_run_results.RunDetails(exit_code=-1)
start_time = time.time()
enabled_pixel_tests_in_retry = False
try:
initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
int(self._options.child_processes), retrying=False)
tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
if self._options.retry_failures and tests_to_retry and not initial_results.interrupted:
enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
_log.info('')
_log.info("Retrying %d unexpected failure(s) ..." % len(tests_to_retry))
_log.info('')
retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
num_workers=1, retrying=True)
if enabled_pixel_tests_in_retry:
self._options.pixel_tests = False
else:
retry_results = None
finally:
self._clean_up_run()
end_time = time.time()
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
_log.debug("looking for new crash logs")
self._look_for_new_crash_logs(initial_results, start_time)
if retry_results:
self._look_for_new_crash_logs(retry_results, start_time)
_log.debug("summarizing results")
summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
self._printer.print_results(end_time - start_time, initial_results, summarized_results)
if not self._options.dry_run:
self._port.print_leaks_summary()
self._upload_json_files(summarized_results, initial_results)
results_path = self._filesystem.join(self._results_directory, "results.html")
self._copy_results_html_file(results_path)
if self._options.show_results and (initial_results.unexpected_results_by_name or
(self._options.full_results_html and initial_results.total_failures)):
self._port.show_results_html_file(results_path)
return test_run_results.RunDetails(self._port.exit_code_from_summarized_results(summarized_results),
summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
needs_http = any(self._is_http_test(test) for test in tests_to_run)
needs_websockets = any(self._is_websocket_test(test) for test in tests_to_run)
test_inputs = []
for _ in xrange(iterations):
for test in tests_to_run:
for _ in xrange(repeat_each):
test_inputs.append(self._test_input_for_file(test))
return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, retrying)
def _clean_up_run(self):
_log.debug("Flushing stdout")
sys.stdout.flush()
_log.debug("Flushing stderr")
sys.stderr.flush()
_log.debug("Stopping helper")
self._port.stop_helper()
_log.debug("Cleaning up port")
self._port.clean_up_test_run()
def _force_pixel_tests_if_needed(self):
if self._options.pixel_tests:
return False
_log.debug("Restarting helper")
self._port.stop_helper()
self._options.pixel_tests = True
self._port.start_helper()
return True
def _look_for_new_crash_logs(self, run_results, start_time):
"""Since crash logs can take a long time to be written out if the system is
under stress do a second pass at the end of the test run.
run_results: the results of the test run
start_time: time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
for test, result in run_results.unexpected_results_by_name.iteritems():
if (result.type != test_expectations.CRASH):
continue
for failure in result.failures:
if not isinstance(failure, test_failures.FailureCrash):
continue
crashed_processes.append([test, failure.process_name, failure.pid])
sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
if sample_files:
for test, sample_file in sample_files.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.copy_sample_file(sample_file)
crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
if crash_logs:
for test, crash_log in crash_logs.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.write_crash_log(crash_log)
def _clobber_old_results(self):
# Just clobber the actual test results directories since the other
# files in the results directory are explicitly used for cross-run
# tracking.
self._printer.write_update("Clobbering old results in %s" %
self._results_directory)
layout_tests_dir = self._port.layout_tests_dir()
possible_dirs = self._port.test_dirs()
for dirname in possible_dirs:
if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
def _tests_to_retry(self, run_results, include_crashes):
return [result.test_name for result in run_results.unexpected_results_by_name.values() if
((result.type != test_expectations.PASS) and
(result.type != test_expectations.MISSING) and
(result.type != test_expectations.CRASH or include_crashes))]
def _upload_json_files(self, summarized_results, initial_results):
"""Writes the results of the test run as JSON files into the results
dir and upload the files to the appengine server.
Args:
summarized_results: dict of results
initial_results: full summary object
"""
_log.debug("Writing JSON files in %s." % self._results_directory)
# FIXME: Upload stats.json to the server and delete times_ms.
times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._results_directory, "stats.json")
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
# We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")
generator = json_layout_results_generator.JSONLayoutResultsGenerator(
self._port, self._options.builder_name, self._options.build_name,
self._options.build_number, self._results_directory,
BUILDER_BASE_URL,
self._expectations, initial_results,
self._options.test_results_server,
"layout-tests",
self._options.master_name)
_log.debug("Finished writing JSON files.")
json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
generator.upload_json_files(json_files)
incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")
# Remove these files from the results directory so they don't take up too much space on the buildbot.
# The tools use the version we uploaded to the results server anyway.
self._filesystem.remove(times_json_path)
self._filesystem.remove(incremental_results_path)
def _copy_results_html_file(self, destination_path):
base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
results_file = self._filesystem.join(base_dir, 'results.html')
# Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
# so make sure it exists before we try to copy it.
if self._filesystem.exists(results_file):
self._filesystem.copyfile(results_file, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
return int(worker_name.split('/')[1]) if worker_name else -1
stats = {}
for result in initial_results.results_by_name.values():
if result.type != test_expectations.SKIP:
stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
stats_trie = {}
for name, value in stats.iteritems():
json_results_generator.add_path_to_trie(name, value, stats_trie)
return stats_trie
|
bsd-3-clause
|
ds-hwang/chromium-crosswalk
|
tools/valgrind/drmemory/PRESUBMIT.py
|
61
|
1175
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
def CheckChange(input_api, output_api):
"""Checks the DrMemory suppression files for bad suppressions."""
# TODO(timurrrr): find out how to do relative imports
# and remove this ugly hack. Also, the CheckChange function won't be needed.
tools_vg_path = input_api.os_path.join(input_api.PresubmitLocalPath(), '..')
import sys
old_path = sys.path
try:
sys.path = sys.path + [tools_vg_path]
import suppressions
return suppressions.PresubmitCheck(input_api, output_api)
finally:
sys.path = old_path
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
def GetPreferredTryMasters(project, change):
return {
'tryserver.chromium.win': {
'win_drmemory': set(['defaulttests']),
}
}
|
bsd-3-clause
|
abdulbaqi/quranf
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/utils.py
|
222
|
19653
|
# -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import MissingSchema, InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringI
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
"""
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved, unreserved,
# or '%')
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return {}
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return {}
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return {}
# If we get here, we either didn't have no_proxy set or we're not going
# anywhere that no_proxy applies to, and the system settings don't require
# bypassing the proxy for the current URL.
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate', 'compress')),
'Accept': '*/*'
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in value.split(","):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def except_on_missing_scheme(url):
"""Given a URL, raise a MissingSchema exception if the scheme is missing.
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
if not scheme:
raise MissingSchema('Proxy URLs must have explicit schemes.')
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
|
mit
|
ChrisLR/Python-Roguelike-Template
|
scenes/character_creation/scene.py
|
1
|
9203
|
from bearlibterminal import terminal
from clubsandwich.ui import (
UIScene,
SingleLineTextInputView,
LabelView,
CyclingButtonView,
ButtonView,
LayoutOptions,
WindowView,
)
from components.needs import Needs
from data.python_templates.classes import character_class_templates
from data.python_templates.needs import hunger, thirst
from data.python_templates.outfits import starter_warrior, starter_thief, starter_ranger
import races
from scenes.game.scene import GameScene
from ui.controls.validatedintstepperview import ValidatedIntStepperView
from scenes.character_creation.choicesresolution import ChoicesResolutionWindow
from components.stats import CharacterStats
from util.abilityscoreset import AbilityScoreSet
class CharacterCreationScene(UIScene):
ID = "CharacterCreation"
# TODO Remake this properly using the new style controls interface.
def __init__(self, game_context):
self.covers_screen = True
self.game_context = game_context
self.sorted_classes = sorted(character_class_templates.values(), key=lambda c_class: c_class.name)
sorted_classes_names = [character_class.name for character_class in self.sorted_classes]
self.sorted_races = races.listing
sorted_races_names = [race.name for race in self.sorted_races]
views = [
WindowView(title='Character Creation', subviews=[
LabelView("Name:", layout_options=LayoutOptions(**get_left_layout(2))),
SingleLineTextInputView(
callback=self.set_name,
layout_options=LayoutOptions(**get_right_layout(3, width=0.2, right=0.4))
),
LabelView("Class:", layout_options=LayoutOptions(**get_left_layout(3))),
CyclingButtonView(
options=sorted_classes_names,
initial_value=sorted_classes_names[0],
callback=self.set_character_class,
layout_options=LayoutOptions(**get_right_layout(3))
),
LabelView("Race:", layout_options=LayoutOptions(**get_left_layout(4))),
CyclingButtonView(
options=sorted_races_names,
initial_value=sorted_races_names[0],
callback=self.set_race,
layout_options=LayoutOptions(**get_right_layout(4))
),
LabelView("Strength:", layout_options=LayoutOptions(**get_left_layout(6))),
ValidatedIntStepperView(
validation_callback=self.validate_points,
value=8, callback=lambda value: self.set_stat("Strength", value),
min_value=8, max_value=15,
layout_options=LayoutOptions(**get_right_layout(7, width=5)),
),
LabelView("Dexterity:", layout_options=LayoutOptions(**get_left_layout(7))),
ValidatedIntStepperView(
validation_callback=self.validate_points,
value=8, callback=lambda value: self.set_stat("Dexterity", value),
min_value=8, max_value=15,
layout_options=LayoutOptions(**get_right_layout(8, width=5)),
),
LabelView("Constitution:", layout_options=LayoutOptions(**get_left_layout(8))),
ValidatedIntStepperView(
validation_callback=self.validate_points,
value=8, callback=lambda value: self.set_stat("Constitution", value),
min_value=8, max_value=15,
layout_options=LayoutOptions(**get_right_layout(9, width=5)),
),
LabelView("Intelligence:", layout_options=LayoutOptions(**get_left_layout(9))),
ValidatedIntStepperView(
validation_callback=self.validate_points,
value=8, callback=lambda value: self.set_stat("Intelligence", value),
min_value=8, max_value=15,
layout_options=LayoutOptions(**get_right_layout(10, width=5)),
),
LabelView("Charisma:", layout_options=LayoutOptions(**get_left_layout(10))),
ValidatedIntStepperView(
validation_callback=self.validate_points,
value=8, callback=lambda value: self.set_stat("Charisma", value),
min_value=8, max_value=15,
layout_options=LayoutOptions(**get_right_layout(11, width=5)),
),
LabelView("Wisdom:", layout_options=LayoutOptions(**get_left_layout(11))),
ValidatedIntStepperView(
validation_callback=self.validate_points,
value=8, callback=lambda value: self.set_stat("Wisdom", value),
min_value=8, max_value=15,
layout_options=LayoutOptions(**get_right_layout(12, width=5))
),
ButtonView('Finish', self.check_choices,
layout_options=LayoutOptions(**get_left_layout(13, left=0.45)))
])
]
super().__init__(views)
self.character_factory = self.game_context.character_factory
self.body_factory = self.game_context.body_factory
self.name = ""
self.character_class = self.sorted_classes[0]
self.race = self.sorted_races[0]
self.stats = {
"Strength": 8,
"Dexterity": 8,
"Constitution": 8,
"Intelligence": 8,
"Charisma": 8,
"Wisdom": 8
}
self.points_left = 27
self.choices = {}
def set_name(self, value):
self.name = value
def set_character_class(self, value):
self.character_class = next((character_class for character_class in self.sorted_classes
if character_class.name == value), None)
def set_race(self, value):
self.race = next((race for race in self.sorted_races
if race.name == value), None)
def set_stat(self, name, value):
self.stats[name] = value
def validate_points(self, old_value, new_value):
if new_value > old_value:
point_cost = 1 if old_value < 13 else 2
if self.points_left >= point_cost:
self.points_left -= point_cost
return True
if new_value < old_value:
point_cost = 1 if new_value < 13 else 2
if new_value >= 8:
self.points_left += point_cost
return True
return False
def check_choices(self):
if not self.choices:
if hasattr(self.race, 'choices'):
chain = ((key, value) for key, value in self.race.choices.items())
def move_through_chain(choice, choice_name):
if choice and choice_name:
self.choices[choice_name] = choice
choice_name, choices = next(chain, (None, None))
if choice_name:
self.director.push_scene(ChoicesResolutionWindow(move_through_chain, choice_name, choices))
else:
self.finish()
move_through_chain(None, None)
else:
self.finish()
def finish(self):
if self.choices:
race = self.race(**self.choices)
else:
race = self.race()
self.game_context.player = self.character_factory.create(
uid="player",
name=self.name,
class_uid=self.character_class.uid,
race=race,
stats=CharacterStats(AbilityScoreSet(**{uid.lower(): value for uid, value in self.stats.items()})),
body_uid=self.race.body.uid,
enforce_max_hp=True
)
player = self.game_context.player
player.register_component(Needs.create_standard(1, 100, hunger, thirst))
# TODO We will need a much better way to assign outfits.
if self.character_class.uid.lower() == "thief":
starter_thief.apply(player)
elif self.character_class.uid.lower() == "warrior":
starter_warrior.apply(player)
elif self.character_class.uid.lower() == "ranger":
starter_ranger.apply(player)
player.is_player = True
self.director.replace_scene(GameScene(self.game_context))
def terminal_read(self, val):
super().terminal_read(val)
if val == terminal.TK_UP:
self.view.find_prev_responder()
elif val == terminal.TK_DOWN:
self.view.find_next_responder()
def get_left_layout(top, **kwargs):
layout_options = dict(width=0.1, left=0.3, top=top, height=0.1, bottom=None, right=None)
if kwargs:
layout_options.update(kwargs)
return layout_options
def get_right_layout(top, **kwargs):
layout_options = dict(width=0.1, left=None, top=top, height=0.1, bottom=None, right=0.5)
if kwargs:
layout_options.update(kwargs)
return layout_options
|
mit
|
holm/suds
|
suds/umx/core.py
|
199
|
7575
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides base classes for XML->object I{unmarshalling}.
"""
from logging import getLogger
from suds import *
from suds.umx import *
from suds.umx.attrlist import AttrList
from suds.sax.text import Text
from suds.sudsobject import Factory, merge
log = getLogger(__name__)
reserved = { 'class':'cls', 'def':'dfn', }
class Core:
"""
The abstract XML I{node} unmarshaller. This class provides the
I{core} unmarshalling functionality.
"""
def process(self, content):
"""
Process an object graph representation of the xml I{node}.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A suds object.
@rtype: L{Object}
"""
self.reset()
return self.append(content)
def append(self, content):
"""
Process the specified node and convert the XML document into
a I{suds} L{object}.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A I{append-result} tuple as: (L{Object}, I{value})
@rtype: I{append-result}
@note: This is not the proper entry point.
@see: L{process()}
"""
self.start(content)
self.append_attributes(content)
self.append_children(content)
self.append_text(content)
self.end(content)
return self.postprocess(content)
def postprocess(self, content):
"""
Perform final processing of the resulting data structure as follows:
- Mixed values (children and text) will have a result of the I{content.node}.
- Simi-simple values (attributes, no-children and text) will have a result of a
property object.
- Simple values (no-attributes, no-children with text nodes) will have a string
result equal to the value of the content.node.getText().
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: The post-processed result.
@rtype: I{any}
"""
node = content.node
if len(node.children) and node.hasText():
return node
attributes = AttrList(node.attributes)
if attributes.rlen() and \
not len(node.children) and \
node.hasText():
p = Factory.property(node.name, node.getText())
return merge(content.data, p)
if len(content.data):
return content.data
lang = attributes.lang()
if content.node.isnil():
return None
if not len(node.children) and content.text is None:
if self.nillable(content):
return None
else:
return Text('', lang=lang)
if isinstance(content.text, basestring):
return Text(content.text, lang=lang)
else:
return content.text
def append_attributes(self, content):
"""
Append attribute nodes into L{Content.data}.
Attributes in the I{schema} or I{xml} namespaces are skipped.
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
attributes = AttrList(content.node.attributes)
for attr in attributes.real():
name = attr.name
value = attr.value
self.append_attribute(name, value, content)
def append_attribute(self, name, value, content):
"""
Append an attribute name/value into L{Content.data}.
@param name: The attribute name
@type name: basestring
@param value: The attribute's value
@type value: basestring
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
key = name
key = '_%s' % reserved.get(key, key)
setattr(content.data, key, value)
def append_children(self, content):
"""
Append child nodes into L{Content.data}
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
for child in content.node:
cont = Content(child)
cval = self.append(cont)
key = reserved.get(child.name, child.name)
if key in content.data:
v = getattr(content.data, key)
if isinstance(v, list):
v.append(cval)
else:
setattr(content.data, key, [v, cval])
continue
if self.unbounded(cont):
if cval is None:
setattr(content.data, key, [])
else:
setattr(content.data, key, [cval,])
else:
setattr(content.data, key, cval)
def append_text(self, content):
"""
Append text nodes into L{Content.data}
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
if content.node.hasText():
content.text = content.node.getText()
def reset(self):
pass
def start(self, content):
"""
Processing on I{node} has started. Build and return
the proper object.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A subclass of Object.
@rtype: L{Object}
"""
content.data = Factory.object(content.node.name)
def end(self, content):
"""
Processing on I{node} has ended.
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
pass
def bounded(self, content):
"""
Get whether the content is bounded (not a list).
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if bounded, else False
@rtype: boolean
'"""
return ( not self.unbounded(content) )
def unbounded(self, content):
"""
Get whether the object is unbounded (a list).
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if unbounded, else False
@rtype: boolean
'"""
return False
def nillable(self, content):
"""
Get whether the object is nillable.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if nillable, else False
@rtype: boolean
'"""
return False
|
lgpl-3.0
|
pair-code/lit
|
lit_nlp/examples/coref/datasets/winogender.py
|
2
|
6390
|
"""Coreference version of the Winogender dataset.
Each instance has two edges, one between the pronoun and the occupation and one
between the pronoun and the participant. The pronoun is always span1.
There are 120 templates in the Winogender set, 60 coreferent with the
occupation, and 60 coreferent with the participant. Each is instantiated
six times: with and without "someone" substituting for the participant,
and with {male, female, neutral} pronouns, for a total of 720 examples.
Winogender repo: https://github.com/rudinger/winogender-schemas
Paper: Gender Bias in Coreference Resolution (Rudinger et al. 2018),
https://arxiv.org/pdf/1804.09301.pdf
"""
import enum
import os
from typing import Optional
from absl import logging
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import dtypes as lit_dtypes
from lit_nlp.api import types as lit_types
import pandas as pd
import transformers # for file caching
EdgeLabel = lit_dtypes.EdgeLabel
DATA_ROOT = "https://raw.githubusercontent.com/rudinger/winogender-schemas/master/data/" # pylint: disable=line-too-long
def get_data(name):
"""Download data or return local cache path."""
url = os.path.join(DATA_ROOT, name)
logging.info("Winogender: retrieving data file %s", url)
return transformers.file_utils.cached_path(url)
## From gap-coreference/constants.py
class Gender(enum.Enum):
UNKNOWN = 0
MASCULINE = 1
FEMININE = 2
NOM = "$NOM_PRONOUN"
POSS = "$POSS_PRONOUN"
ACC = "$ACC_PRONOUN"
PRONOUN_MAP = {
Gender.FEMININE: {
NOM: "she",
POSS: "her",
ACC: "her"
},
Gender.MASCULINE: {
NOM: "he",
POSS: "his",
ACC: "him"
},
Gender.UNKNOWN: {
NOM: "they",
POSS: "their",
ACC: "them"
},
}
ANSWER_VOCAB = ["occupation", "participant"]
PRONOUNS_BY_GENDER = {k: "/".join(PRONOUN_MAP[k].values()) for k in PRONOUN_MAP}
# Based on winogender-schemas/scripts/instantiate.py, but adapted to LIT format.
def generate_instance(occupation,
participant,
answer,
sentence,
gender=Gender.UNKNOWN,
someone=False):
"""Generate a Winogender example from a template row."""
toks = sentence.split(" ")
part_index = toks.index("$PARTICIPANT")
if not someone:
# we are using the instantiated participant,
# e.g. "client", "patient", "customer",...
toks[part_index] = participant
else: # we are using the bleached NP "someone" for the other participant
# first, remove the token that precedes $PARTICIPANT, i.e. "the"
toks = toks[:part_index - 1] + toks[part_index:]
# recompute participant index (it should be part_index - 1)
part_index = toks.index("$PARTICIPANT")
toks[part_index] = "Someone" if part_index == 0 else "someone"
# Make sure we do this /after/ substituting "someone",
# since that may change indices.
occ_index = toks.index("$OCCUPATION")
# This should always pass on the regular Winogender dataset.
assert " " not in occupation, "Occupation must be single-token."
toks[occ_index] = occupation
pronoun_idx = None
gendered_toks = []
for i, t in enumerate(toks):
sub = PRONOUN_MAP[gender].get(t, t)
if sub != t:
pronoun_idx = i
gendered_toks.append(sub)
# NOM, POSS, ACC
pronoun_type = toks[pronoun_idx][1:].replace("_PRONOUN", "")
# Process text for fluency
text = " ".join(gendered_toks)
text = text.replace("they was", "they were")
text = text.replace("They was", "They were")
record = {"text": text, "tokens": text.split()}
t0 = EdgeLabel(
span1=(occ_index, occ_index + 1),
span2=(pronoun_idx, pronoun_idx + 1),
label=int(1 if answer == 0 else 0))
t1 = EdgeLabel(
span1=(part_index, part_index + 1),
span2=(pronoun_idx, pronoun_idx + 1),
label=int(1 if answer == 1 else 0))
record["coref"] = [t0, t1]
record.update({
"occupation": occupation,
"participant": participant,
"answer": ANSWER_VOCAB[answer],
"someone": str(someone),
"pronouns": PRONOUNS_BY_GENDER[gender],
"pronoun_type": pronoun_type,
"gender": gender.name,
})
return record
class WinogenderDataset(lit_dataset.Dataset):
"""Coreference on Winogender schemas (Rudinger et al. 2018)."""
# These should match the args to generate_instance()
TSV_COLUMN_NAMES = ["occupation", "participant", "answer", "sentence"]
def __init__(self,
templates_path: Optional[str] = None,
occupation_stats_path: Optional[str] = None):
templates_path = templates_path or get_data("templates.tsv")
occupation_stats_path = occupation_stats_path or get_data(
"occupations-stats.tsv")
# Load templates and make a DataFrame.
with open(templates_path) as fd:
self.templates_df = pd.read_csv(
fd, sep="\t", header=0, names=self.TSV_COLUMN_NAMES)
# Load occpuation stats.
with open(occupation_stats_path) as fd:
self.occupation_df = pd.read_csv(fd, sep="\t").set_index("occupation")
# Make examples for each {someone} x {gender} x {template}
self._examples = []
for _, row in self.templates_df.iterrows():
for someone in {False, True}:
for gender in Gender:
r = generate_instance(someone=someone, gender=gender, **row)
r["pf_bls"] = (
self.occupation_df.bls_pct_female[r["occupation"]] / 100.0)
self._examples.append(r)
def spec(self):
return {
"text":
lit_types.TextSegment(),
"tokens":
lit_types.Tokens(parent="text"),
"coref":
lit_types.EdgeLabels(align="tokens"),
# Metadata fields for filtering and analysis.
"occupation":
lit_types.CategoryLabel(),
"participant":
lit_types.CategoryLabel(),
"answer":
lit_types.CategoryLabel(vocab=ANSWER_VOCAB),
"someone":
lit_types.CategoryLabel(vocab=["True", "False"]),
"pronouns":
lit_types.CategoryLabel(vocab=list(PRONOUNS_BY_GENDER.values())),
"pronoun_type":
lit_types.CategoryLabel(vocab=["NOM", "POSS", "ACC"]),
"gender":
lit_types.CategoryLabel(vocab=[g.name for g in Gender]),
"pf_bls":
lit_types.Scalar(),
}
|
apache-2.0
|
mapr/sahara
|
sahara/plugins/vanilla/hadoop2/validation.py
|
2
|
5127
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.i18n import _
from sahara.plugins.general import exceptions as ex
from sahara.plugins.general import utils as u
from sahara.plugins.vanilla.hadoop2 import config_helper as cu
from sahara.plugins.vanilla import utils as vu
from sahara.utils import general as gu
def validate_cluster_creating(pctx, cluster):
nn_count = _get_inst_count(cluster, 'namenode')
if nn_count != 1:
raise ex.InvalidComponentCountException('namenode', 1, nn_count)
snn_count = _get_inst_count(cluster, 'secondarynamenode')
if snn_count not in [0, 1]:
raise ex.InvalidComponentCountException('secondarynamenode', '0 or 1',
snn_count)
rm_count = _get_inst_count(cluster, 'resourcemanager')
if rm_count not in [0, 1]:
raise ex.InvalidComponentCountException('resourcemanager', '0 or 1',
rm_count)
hs_count = _get_inst_count(cluster, 'historyserver')
if hs_count not in [0, 1]:
raise ex.InvalidComponentCountException('historyserver', '0 or 1',
hs_count)
nm_count = _get_inst_count(cluster, 'nodemanager')
if rm_count == 0:
if nm_count > 0:
raise ex.RequiredServiceMissingException('resourcemanager',
required_by='nodemanager')
oo_count = _get_inst_count(cluster, 'oozie')
dn_count = _get_inst_count(cluster, 'datanode')
if oo_count not in [0, 1]:
raise ex.InvalidComponentCountException('oozie', '0 or 1', oo_count)
if oo_count == 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException('datanode',
required_by='oozie')
if nm_count < 1:
raise ex.RequiredServiceMissingException('nodemanager',
required_by='oozie')
if hs_count != 1:
raise ex.RequiredServiceMissingException('historyserver',
required_by='oozie')
rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster)
if dn_count < rep_factor:
raise ex.InvalidComponentCountException(
'datanode', rep_factor, dn_count, _('Number of datanodes must be '
'not less than '
'dfs.replication.'))
def validate_additional_ng_scaling(cluster, additional):
rm = vu.get_resourcemanager(cluster)
scalable_processes = _get_scalable_processes()
for ng_id in additional:
ng = gu.get_by_id(cluster.node_groups, ng_id)
if not set(ng.node_processes).issubset(scalable_processes):
msg = _("Vanilla plugin cannot scale nodegroup with processes: %s")
raise ex.NodeGroupCannotBeScaled(ng.name,
msg % ' '.join(ng.node_processes))
if not rm and 'nodemanager' in ng.node_processes:
msg = _("Vanilla plugin cannot scale node group with processes "
"which have no master-processes run in cluster")
raise ex.NodeGroupCannotBeScaled(ng.name, msg)
def validate_existing_ng_scaling(pctx, cluster, existing):
scalable_processes = _get_scalable_processes()
dn_to_delete = 0
for ng in cluster.node_groups:
if ng.id in existing:
if ng.count > existing[ng.id] and "datanode" in ng.node_processes:
dn_to_delete += ng.count - existing[ng.id]
if not set(ng.node_processes).issubset(scalable_processes):
msg = _("Vanilla plugin cannot scale nodegroup "
"with processes: %s")
raise ex.NodeGroupCannotBeScaled(
ng.name, msg % ' '.join(ng.node_processes))
dn_amount = len(vu.get_datanodes(cluster))
rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster)
if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
msg = _("Vanilla plugin cannot shrink cluster because it would be "
"not enough nodes for replicas (replication factor is %s)")
raise ex.ClusterCannotBeScaled(
cluster.name, msg % rep_factor)
def _get_scalable_processes():
return ['datanode', 'nodemanager']
def _get_inst_count(cluster, process):
return sum([ng.count for ng in u.get_node_groups(cluster, process)])
|
apache-2.0
|
christophlsa/odoo
|
addons/mrp/report/mrp_report.py
|
341
|
3839
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class report_workcenter_load(osv.osv):
_name="report.workcenter.load"
_description="Work Center Load"
_auto = False
_log_access = False
_columns = {
'name': fields.char('Week', required=True),
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'cycle': fields.float('Number of Cycles'),
'hour': fields.float('Number of Hours'),
}
def init(self, cr):
cr.execute("""
create or replace view report_workcenter_load as (
SELECT
min(wl.id) as id,
to_char(p.date_planned,'YYYY:mm:dd') as name,
SUM(wl.hour) AS hour,
SUM(wl.cycle) AS cycle,
wl.workcenter_id as workcenter_id
FROM
mrp_production_workcenter_line wl
LEFT JOIN mrp_production p
ON p.id = wl.production_id
GROUP BY
wl.workcenter_id,
to_char(p.date_planned,'YYYY:mm:dd')
)""")
class report_mrp_inout(osv.osv):
_name="report.mrp.inout"
_description="Stock value variation"
_auto = False
_log_access = False
_rec_name = 'date'
_columns = {
'date': fields.char('Week', required=True),
'value': fields.float('Stock value', required=True, digits=(16,2)),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
def init(self, cr):
cr.execute("""
create or replace view report_mrp_inout as (
select
min(sm.id) as id,
to_char(sm.date,'YYYY:IW') as date,
sum(case when (sl.usage='internal') then
sm.price_unit * sm.product_qty
else
0.0
end - case when (sl2.usage='internal') then
sm.price_unit * sm.product_qty
else
0.0
end) as value,
sm.company_id
from
stock_move sm
left join product_product pp
on (pp.id = sm.product_id)
left join product_template pt
on (pt.id = pp.product_tmpl_id)
left join stock_location sl
on ( sl.id = sm.location_id)
left join stock_location sl2
on ( sl2.id = sm.location_dest_id)
where
sm.state = 'done'
group by
to_char(sm.date,'YYYY:IW'), sm.company_id
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
snnn/tensorflow
|
tensorflow/python/framework/random_seed_test.py
|
58
|
2522
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.random_seed."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class RandomSeedTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testRandomSeed(self):
test_cases = [
# Each test case is a tuple with input to get_seed:
# (input_graph_seed, input_op_seed)
# and output from get_seed:
# (output_graph_seed, output_op_seed)
((None, None), (None, None)),
((None, 1), (random_seed.DEFAULT_GRAPH_SEED, 1)),
((1, 1), (1, 1)),
((0, 0), (0, 2**31 - 1)), # Avoid nondeterministic (0, 0) output
((2**31 - 1, 0), (0, 2**31 - 1)), # Don't wrap to (0, 0) either
((0, 2**31 - 1), (0, 2**31 - 1)), # Wrapping for the other argument
]
if context.executing_eagerly():
# operation seed is random number generated based on global seed.
# it's not tested due to possibility of platform or version difference.
pass
else:
# 0 will be the default_graph._lastid.
test_cases.append(((1, None), (1, 0)))
for tc in test_cases:
tinput, toutput = tc[0], tc[1]
random_seed.set_random_seed(tinput[0])
g_seed, op_seed = random_seed.get_seed(tinput[1])
msg = 'test_case = {0}, got {1}, want {2}'.format(tinput,
(g_seed, op_seed),
toutput)
self.assertEqual((g_seed, op_seed), toutput, msg=msg)
random_seed.set_random_seed(None)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
LecomteEmerick/Essentia-build
|
src/examples/python/streaming_extractor/tuningfrequency.py
|
10
|
3771
|
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
#! /usr/bin/env python
import sys, os
import essentia, essentia.standard, essentia.streaming
from essentia.streaming import *
tonalFrameSize = 4096
tonalHopSize = 2048
class TuningFrequencyExtractor(essentia.streaming.CompositeBase):
def __init__(self, frameSize=tonalFrameSize, hopSize=tonalHopSize):
super(TuningFrequencyExtractor, self).__init__()
fc = FrameCutter(frameSize=frameSize,
hopSize=hopSize,
silentFrames='noise')
w = Windowing(type='blackmanharris62')
spec = Spectrum()
peaks = SpectralPeaks(maxPeaks=10000,
magnitudeThreshold=0.00001,
minFrequency=40,
maxFrequency=5000,
orderBy='frequency');
tuning = TuningFrequency()
fc.frame >> w.frame >> spec.frame
spec.spectrum >> peaks.spectrum
peaks.magnitudes >> tuning.magnitudes
peaks.frequencies >> tuning.frequencies
tuning.tuningCents >> None
# define inputs:
self.inputs['signal'] = fc.signal
# define outputs:
self.outputs['tuningFrequency'] = tuning.tuningFrequency
usage = 'tuningfrequency.py [options] <inputfilename> <outputfilename>'
def parse_args():
import numpy
essentia_version = '%s\n'\
'python version: %s\n'\
'numpy version: %s' % (essentia.__version__, # full version
sys.version.split()[0], # python major version
numpy.__version__) # numpy version
from optparse import OptionParser
parser = OptionParser(usage=usage, version=essentia_version)
parser.add_option("-c","--cpp", action="store_true", dest="generate_cpp",
help="generate cpp code from CompositeBase algorithm")
parser.add_option("-d", "--dot", action="store_true", dest="generate_dot",
help="generate dot and cpp code from CompositeBase algorithm")
(options, args) = parser.parse_args()
return options, args
if __name__ == '__main__':
opts, args = parse_args()
if len(args) != 2:
cmd = './'+os.path.basename(sys.argv[0])+ ' -h'
os.system(cmd)
sys.exit(1)
if opts.generate_dot:
essentia.translate(TuningFrequencyExtractor, 'streaming_extractortuningfrequency', dot_graph=True)
elif opts.generate_cpp:
essentia.translate(TuningFrequencyExtractor, 'streaming_extractortuningfrequency', dot_graph=False)
pool = essentia.Pool()
loader = essentia.streaming.MonoLoader(filename=args[0])
tuning = TuningFrequencyExtractor()
loader.audio >> tuning.signal
tuning.tuningFrequency >> (pool, 'tuning_frequency')
essentia.run(loader)
stats = ['mean', 'var', 'min', 'max', 'dmean', 'dmean2', 'dvar', 'dvar2']
poolStats = essentia.standard.PoolAggregator(defaultStats=stats)(pool)
essentia.standard.YamlOutput(filename=args[1])(poolStats)
|
agpl-3.0
|
elkaneb/QCM
|
vendor/doctrine/orm/docs/en/conf.py
|
2448
|
6497
|
# -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
mit
|
merutak/python-social-auth
|
social/backends/steam.py
|
83
|
1553
|
"""
Steam OpenId backend, docs at:
http://psa.matiasaguirre.net/docs/backends/steam.html
"""
from social.backends.open_id import OpenIdAuth
from social.exceptions import AuthFailed
USER_INFO = 'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?'
class SteamOpenId(OpenIdAuth):
name = 'steam'
URL = 'https://steamcommunity.com/openid'
def get_user_id(self, details, response):
"""Return user unique id provided by service"""
return self._user_id(response)
def get_user_details(self, response):
player = self.get_json(USER_INFO, params={
'key': self.setting('API_KEY'),
'steamids': self._user_id(response)
})
if len(player['response']['players']) > 0:
player = player['response']['players'][0]
details = {'username': player.get('personaname'),
'email': '',
'fullname': '',
'first_name': '',
'last_name': '',
'player': player}
else:
details = {}
return details
def consumer(self):
# Steam seems to support stateless mode only, ignore store
if not hasattr(self, '_consumer'):
self._consumer = self.create_consumer()
return self._consumer
def _user_id(self, response):
user_id = response.identity_url.rsplit('/', 1)[-1]
if not user_id.isdigit():
raise AuthFailed(self, 'Missing Steam Id')
return user_id
|
bsd-3-clause
|
Diegojnb/JdeRobot
|
src/types/python/jderobotTypes/pose3d.py
|
8
|
1480
|
#
# Copyright (C) 1997-2017 JDE Developers Team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
# Authors :
# Aitor Martinez Fernandez <[email protected]>
#
class Pose3d ():
def __init__(self):
self.x = 0 # X coord [meters]
self.y = 0 # Y coord [meters]
self.z = 0 # Z coord [meters]
self.h = 1 # H param
self.yaw = 0 #Yaw angle[rads]
self.pitch = 0 # Pitch angle[rads]
self.roll = 0 # Roll angle[rads]
self.q = [0,0,0,0] # Quaternion
self.timeStamp = 0 # Time stamp [s]
def __str__(self):
s = "Pose3D: {\n x: " + str(self.x) + "\n Y: " + str(self.y)
s = s + "\n Z: " + str(self.z) + "\n H: " + str(self.h)
s = s + "\n Yaw: " + str(self.yaw) + "\n Pitch: " + str(self.pitch) + "\n Roll: " + str(self.roll)
s = s + "\n quaternion: " + str(self.q) + "\n timeStamp: " + str(self.timeStamp) + "\n}"
return s
|
gpl-3.0
|
creativcoder/servo
|
tests/wpt/css-tests/tools/html5lib/html5lib/trie/py.py
|
817
|
1763
|
from __future__ import absolute_import, division, unicode_literals
from six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
|
mpl-2.0
|
rschnapka/hr
|
__unported__/hr_schedule/wizard/validate_schedule.py
|
28
|
2415
|
# -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <[email protected]>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import netsvc
from openerp.osv import fields, orm
import logging
_logger = logging.getLogger(__name__)
class department_selection(orm.TransientModel):
_name = 'hr.schedule.validate.departments'
_description = 'Department Selection for Validation'
_columns = {
'department_ids': fields.many2many(
'hr.department',
'hr_department_group_rel',
'employee_id',
'department_id',
'Departments',
),
}
def view_schedules(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
return {
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'hr.schedule',
'domain': [
('department_id', 'in', data['department_ids']),
('state', 'in', ['draft']),
],
'type': 'ir.actions.act_window',
'target': 'new',
'nodestroy': True,
'context': context,
}
def do_validate(self, cr, uid, ids, context=None):
wkf_service = netsvc.LocalService('workflow')
data = self.read(cr, uid, ids, context=context)[0]
sched_ids = self.pool.get('hr.schedule').search(
cr, uid, [
('department_id', 'in', data['department_ids'])
], context=context
)
for sched_id in sched_ids:
wkf_service.trg_validate(
uid, 'hr.schedule', sched_id, 'signal_validate', cr)
return {'type': 'ir.actions.act_window_close'}
|
agpl-3.0
|
landonb/hamster-applet
|
wafadmin/Tools/config_c.py
|
6
|
15279
|
#! /usr/bin/env python
# encoding: utf-8
import sys
if sys.hexversion < 0x020400f0: from sets import Set as set
import os,imp,sys,shlex,shutil
from Utils import md5
import Build,Utils,Configure,Task,Options,Logs,TaskGen
from Constants import*
from Configure import conf,conftest
cfg_ver={'atleast-version':'>=','exact-version':'==','max-version':'<=',}
SNIP1='''
int main() {
void *p;
p=(void*)(%s);
return 0;
}
'''
SNIP2='''
int main() {
if ((%(type_name)s *) 0) return 0;
if (sizeof (%(type_name)s)) return 0;
}
'''
SNIP3='''
int main() {
return 0;
}
'''
def parse_flags(line,uselib,env):
lst=shlex.split(line)
while lst:
x=lst.pop(0)
st=x[:2]
ot=x[2:]
if st=='-I'or st=='/I':
if not ot:ot=lst.pop(0)
env.append_unique('CPPPATH_'+uselib,ot)
elif st=='-D':
if not ot:ot=lst.pop(0)
env.append_unique('CXXDEFINES_'+uselib,ot)
env.append_unique('CCDEFINES_'+uselib,ot)
elif st=='-l':
if not ot:ot=lst.pop(0)
env.append_unique('LIB_'+uselib,ot)
elif st=='-L':
if not ot:ot=lst.pop(0)
env.append_unique('LIBPATH_'+uselib,ot)
elif x=='-pthread'or x.startswith('+'):
env.append_unique('CCFLAGS_'+uselib,x)
env.append_unique('CXXFLAGS_'+uselib,x)
env.append_unique('LINKFLAGS_'+uselib,x)
elif x=='-framework':
env.append_unique('FRAMEWORK_'+uselib,lst.pop(0))
elif x.startswith('-F'):
env.append_unique('FRAMEWORKPATH_'+uselib,x[2:])
elif x.startswith('-std'):
env.append_unique('CCFLAGS_'+uselib,x)
env.append_unique('LINKFLAGS_'+uselib,x)
elif x.startswith('-Wl'):
env.append_unique('LINKFLAGS_'+uselib,x)
elif x.startswith('-m')or x.startswith('-f'):
env.append_unique('CCFLAGS_'+uselib,x)
env.append_unique('CXXFLAGS_'+uselib,x)
def ret_msg(self,f,kw):
if isinstance(f,str):
return f
return f(kw)
def validate_cfg(self,kw):
if not'path'in kw:
kw['path']='pkg-config --errors-to-stdout --print-errors'
if'atleast_pkgconfig_version'in kw:
if not'msg'in kw:
kw['msg']='Checking for pkg-config version >= %s'%kw['atleast_pkgconfig_version']
return
if'modversion'in kw:
return
if'variables'in kw:
if not'msg'in kw:
kw['msg']='Checking for %s variables'%kw['package']
return
for x in cfg_ver.keys():
y=x.replace('-','_')
if y in kw:
if not'package'in kw:
raise ValueError('%s requires a package'%x)
if not'msg'in kw:
kw['msg']='Checking for %s %s %s'%(kw['package'],cfg_ver[x],kw[y])
return
if not'msg'in kw:
kw['msg']='Checking for %s'%(kw['package']or kw['path'])
if not'okmsg'in kw:
kw['okmsg']='yes'
if not'errmsg'in kw:
kw['errmsg']='not found'
def cmd_and_log(self,cmd,kw):
Logs.debug('runner: %s\n'%cmd)
if self.log:
self.log.write('%s\n'%cmd)
try:
p=Utils.pproc.Popen(cmd,stdout=Utils.pproc.PIPE,stderr=Utils.pproc.PIPE,shell=True)
(out,err)=p.communicate()
except OSError,e:
self.log.write('error %r'%e)
self.fatal(str(e))
out=str(out)
err=str(err)
if self.log:
self.log.write(out)
self.log.write(err)
if p.returncode:
if not kw.get('errmsg',''):
if kw.get('mandatory',False):
kw['errmsg']=out.strip()
else:
kw['errmsg']='no'
self.fatal('fail')
return out
def exec_cfg(self,kw):
if'atleast_pkgconfig_version'in kw:
cmd='%s --atleast-pkgconfig-version=%s'%(kw['path'],kw['atleast_pkgconfig_version'])
self.cmd_and_log(cmd,kw)
if not'okmsg'in kw:
kw['okmsg']='yes'
return
for x in cfg_ver:
y=x.replace('-','_')
if y in kw:
self.cmd_and_log('%s --%s=%s %s'%(kw['path'],x,kw[y],kw['package']),kw)
if not'okmsg'in kw:
kw['okmsg']='yes'
self.define(self.have_define(kw.get('uselib_store',kw['package'])),1,0)
break
if'modversion'in kw:
version=self.cmd_and_log('%s --modversion %s'%(kw['path'],kw['modversion']),kw).strip()
self.define('%s_VERSION'%Utils.quote_define_name(kw.get('uselib_store',kw['modversion'])),version)
return version
if'variables'in kw:
env=kw.get('env',self.env)
uselib=kw.get('uselib_store',kw['package'].upper())
vars=Utils.to_list(kw['variables'])
for v in vars:
val=self.cmd_and_log('%s --variable=%s %s'%(kw['path'],v,kw['package']),kw).strip()
var='%s_%s'%(uselib,v)
env[var]=val
if not'okmsg'in kw:
kw['okmsg']='yes'
return
lst=[kw['path']]
defi=kw.get('define_variable',None)
if not defi:
defi=self.env.PKG_CONFIG_DEFINES or{}
for key,val in defi.iteritems():
lst.append('--define-variable=%s=%s'%(key,val))
lst.append(kw.get('args',''))
lst.append(kw['package'])
cmd=' '.join(lst)
ret=self.cmd_and_log(cmd,kw)
if not'okmsg'in kw:
kw['okmsg']='yes'
self.define(self.have_define(kw.get('uselib_store',kw['package'])),1,0)
parse_flags(ret,kw.get('uselib_store',kw['package'].upper()),kw.get('env',self.env))
return ret
def check_cfg(self,*k,**kw):
self.validate_cfg(kw)
if'msg'in kw:
self.check_message_1(kw['msg'])
ret=None
try:
ret=self.exec_cfg(kw)
except Configure.ConfigurationError,e:
if'errmsg'in kw:
self.check_message_2(kw['errmsg'],'YELLOW')
if'mandatory'in kw and kw['mandatory']:
if Logs.verbose>1:
raise
else:
self.fatal('the configuration failed (see %r)'%self.log.name)
else:
kw['success']=ret
if'okmsg'in kw:
self.check_message_2(self.ret_msg(kw['okmsg'],kw))
return ret
def validate_c(self,kw):
if not'env'in kw:
kw['env']=self.env.copy()
env=kw['env']
if not'compiler'in kw:
kw['compiler']='cc'
if env['CXX_NAME']and Task.TaskBase.classes.get('cxx',None):
kw['compiler']='cxx'
if not self.env['CXX']:
self.fatal('a c++ compiler is required')
else:
if not self.env['CC']:
self.fatal('a c compiler is required')
if not'type'in kw:
kw['type']='cprogram'
assert not(kw['type']!='cprogram'and kw.get('execute',0)),'can only execute programs'
def to_header(dct):
if'header_name'in dct:
dct=Utils.to_list(dct['header_name'])
return''.join(['#include <%s>\n'%x for x in dct])
return''
if not'compile_mode'in kw:
kw['compile_mode']=(kw['compiler']=='cxx')and'cxx'or'cc'
if not'compile_filename'in kw:
kw['compile_filename']='test.c'+((kw['compile_mode']=='cxx')and'pp'or'')
if'framework_name'in kw:
try:TaskGen.task_gen.create_task_macapp
except AttributeError:self.fatal('frameworks require the osx tool')
fwkname=kw['framework_name']
if not'uselib_store'in kw:
kw['uselib_store']=fwkname.upper()
if not kw.get('no_header',False):
if not'header_name'in kw:
kw['header_name']=[]
fwk='%s/%s.h'%(fwkname,fwkname)
if kw.get('remove_dot_h',None):
fwk=fwk[:-2]
kw['header_name']=Utils.to_list(kw['header_name'])+[fwk]
kw['msg']='Checking for framework %s'%fwkname
kw['framework']=fwkname
if'function_name'in kw:
fu=kw['function_name']
if not'msg'in kw:
kw['msg']='Checking for function %s'%fu
kw['code']=to_header(kw)+SNIP1%fu
if not'uselib_store'in kw:
kw['uselib_store']=fu.upper()
if not'define_name'in kw:
kw['define_name']=self.have_define(fu)
elif'type_name'in kw:
tu=kw['type_name']
if not'msg'in kw:
kw['msg']='Checking for type %s'%tu
if not'header_name'in kw:
kw['header_name']='stdint.h'
kw['code']=to_header(kw)+SNIP2%{'type_name':tu}
if not'define_name'in kw:
kw['define_name']=self.have_define(tu.upper())
elif'header_name'in kw:
if not'msg'in kw:
kw['msg']='Checking for header %s'%kw['header_name']
l=Utils.to_list(kw['header_name'])
assert len(l)>0,'list of headers in header_name is empty'
kw['code']=to_header(kw)+SNIP3
if not'uselib_store'in kw:
kw['uselib_store']=l[0].upper()
if not'define_name'in kw:
kw['define_name']=self.have_define(l[0])
if'lib'in kw:
if not'msg'in kw:
kw['msg']='Checking for library %s'%kw['lib']
if not'uselib_store'in kw:
kw['uselib_store']=kw['lib'].upper()
if'staticlib'in kw:
if not'msg'in kw:
kw['msg']='Checking for static library %s'%kw['staticlib']
if not'uselib_store'in kw:
kw['uselib_store']=kw['staticlib'].upper()
if'fragment'in kw:
kw['code']=kw['fragment']
if not'msg'in kw:
kw['msg']='Checking for custom code'
if not'errmsg'in kw:
kw['errmsg']='no'
for(flagsname,flagstype)in[('cxxflags','compiler'),('cflags','compiler'),('linkflags','linker')]:
if flagsname in kw:
if not'msg'in kw:
kw['msg']='Checking for %s flags %s'%(flagstype,kw[flagsname])
if not'errmsg'in kw:
kw['errmsg']='no'
if not'execute'in kw:
kw['execute']=False
if not'errmsg'in kw:
kw['errmsg']='not found'
if not'okmsg'in kw:
kw['okmsg']='yes'
if not'code'in kw:
kw['code']=SNIP3
if not kw.get('success'):kw['success']=None
assert'msg'in kw,'invalid parameters, read http://freehackers.org/~tnagy/wafbook/single.html#config_helpers_c'
def post_check(self,*k,**kw):
is_success=False
if kw['execute']:
if kw['success']is not None:
is_success=True
else:
is_success=(kw['success']==0)
if'define_name'in kw:
if'header_name'in kw or'function_name'in kw or'type_name'in kw or'fragment'in kw:
if kw['execute']:
key=kw['success']
if isinstance(key,str):
if key:
self.define(kw['define_name'],key,quote=kw.get('quote',1))
else:
self.define_cond(kw['define_name'],True)
else:
self.define_cond(kw['define_name'],False)
else:
self.define_cond(kw['define_name'],is_success)
if is_success and'uselib_store'in kw:
import cc,cxx
for k in set(cc.g_cc_flag_vars).union(cxx.g_cxx_flag_vars):
lk=k.lower()
if k=='CPPPATH':lk='includes'
if k=='CXXDEFINES':lk='defines'
if k=='CCDEFINES':lk='defines'
if lk in kw:
val=kw[lk]
if isinstance(val,str):
val=val.rstrip(os.path.sep)
self.env.append_unique(k+'_'+kw['uselib_store'],val)
def check(self,*k,**kw):
self.validate_c(kw)
self.check_message_1(kw['msg'])
ret=None
try:
ret=self.run_c_code(*k,**kw)
except Configure.ConfigurationError,e:
self.check_message_2(kw['errmsg'],'YELLOW')
if'mandatory'in kw and kw['mandatory']:
if Logs.verbose>1:
raise
else:
self.fatal('the configuration failed (see %r)'%self.log.name)
else:
kw['success']=ret
self.check_message_2(self.ret_msg(kw['okmsg'],kw))
self.post_check(*k,**kw)
if not kw.get('execute',False):
return ret==0
return ret
def run_c_code(self,*k,**kw):
test_f_name=kw['compile_filename']
k=0
while k<10000:
dir=os.path.join(self.blddir,'.conf_check_%d'%k)
try:
shutil.rmtree(dir)
except OSError:
pass
try:
os.stat(dir)
except OSError:
break
k+=1
try:
os.makedirs(dir)
except:
self.fatal('cannot create a configuration test folder %r'%dir)
try:
os.stat(dir)
except:
self.fatal('cannot use the configuration test folder %r'%dir)
bdir=os.path.join(dir,'testbuild')
if not os.path.exists(bdir):
os.makedirs(bdir)
env=kw['env']
dest=open(os.path.join(dir,test_f_name),'w')
dest.write(kw['code'])
dest.close()
back=os.path.abspath('.')
bld=Build.BuildContext()
bld.log=self.log
bld.all_envs.update(self.all_envs)
bld.all_envs['default']=env
bld.lst_variants=bld.all_envs.keys()
bld.load_dirs(dir,bdir)
os.chdir(dir)
bld.rescan(bld.srcnode)
if not'features'in kw:
kw['features']=[kw['compile_mode'],kw['type']]
o=bld(features=kw['features'],source=test_f_name,target='testprog')
for k,v in kw.iteritems():
setattr(o,k,v)
self.log.write("==>\n%s\n<==\n"%kw['code'])
try:
bld.compile()
except Utils.WafError:
ret=Utils.ex_stack()
else:
ret=0
os.chdir(back)
if ret:
self.log.write('command returned %r'%ret)
self.fatal(str(ret))
if kw['execute']:
lastprog=o.link_task.outputs[0].abspath(env)
args=Utils.to_list(kw.get('exec_args',[]))
proc=Utils.pproc.Popen([lastprog]+args,stdout=Utils.pproc.PIPE,stderr=Utils.pproc.PIPE)
(out,err)=proc.communicate()
w=self.log.write
w(str(out))
w('\n')
w(str(err))
w('\n')
w('returncode %r'%proc.returncode)
w('\n')
if proc.returncode:
self.fatal(Utils.ex_stack())
ret=out
return ret
def check_cxx(self,*k,**kw):
kw['compiler']='cxx'
return self.check(*k,**kw)
def check_cc(self,*k,**kw):
kw['compiler']='cc'
return self.check(*k,**kw)
def define(self,define,value,quote=1):
assert define and isinstance(define,str)
tbl=self.env[DEFINES]or Utils.ordered_dict()
if isinstance(value,str):
if quote:
tbl[define]='"%s"'%repr('"'+value)[2:-1].replace('"','\\"')
else:
tbl[define]=value
elif isinstance(value,int):
tbl[define]=value
else:
raise TypeError('define %r -> %r must be a string or an int'%(define,value))
self.env[DEFINES]=tbl
self.env[define]=value
def undefine(self,define):
assert define and isinstance(define,str)
tbl=self.env[DEFINES]or Utils.ordered_dict()
value=UNDEFINED
tbl[define]=value
self.env[DEFINES]=tbl
self.env[define]=value
def define_cond(self,name,value):
if value:
self.define(name,1)
else:
self.undefine(name)
def is_defined(self,key):
defines=self.env[DEFINES]
if not defines:
return False
try:
value=defines[key]
except KeyError:
return False
else:
return value!=UNDEFINED
def get_define(self,define):
try:return self.env[DEFINES][define]
except KeyError:return None
def have_define(self,name):
return self.__dict__.get('HAVE_PAT','HAVE_%s')%Utils.quote_define_name(name)
def write_config_header(self,configfile='',env='',guard='',top=False):
if not configfile:configfile=WAF_CONFIG_H
waf_guard=guard or'_%s_WAF'%Utils.quote_define_name(configfile)
if not env:env=self.env
if top:
diff=''
else:
diff=Utils.diff_path(self.srcdir,self.curdir)
full=os.sep.join([self.blddir,env.variant(),diff,configfile])
full=os.path.normpath(full)
(dir,base)=os.path.split(full)
try:os.makedirs(dir)
except:pass
dest=open(full,'w')
dest.write('/* Configuration header created by Waf - do not edit */\n')
dest.write('#ifndef %s\n#define %s\n\n'%(waf_guard,waf_guard))
dest.write(self.get_config_header())
env.append_unique(CFG_FILES,os.path.join(diff,configfile))
dest.write('\n#endif /* %s */\n'%waf_guard)
dest.close()
def get_config_header(self):
config_header=[]
tbl=self.env[DEFINES]or Utils.ordered_dict()
for key in tbl.allkeys:
value=tbl[key]
if value is None:
config_header.append('#define %s'%key)
elif value is UNDEFINED:
config_header.append('/* #undef %s */'%key)
else:
config_header.append('#define %s %s'%(key,value))
return"\n".join(config_header)
def find_cpp(conf):
v=conf.env
cpp=None
if v['CPP']:cpp=v['CPP']
elif'CPP'in conf.environ:cpp=conf.environ['CPP']
if not cpp:cpp=conf.find_program('cpp',var='CPP')
if not cpp:cpp=v['CC']
if not cpp:cpp=v['CXX']
v['CPP']=cpp
def cc_add_flags(conf):
conf.add_os_flags('CFLAGS','CCFLAGS')
conf.add_os_flags('CPPFLAGS')
def cxx_add_flags(conf):
conf.add_os_flags('CXXFLAGS')
conf.add_os_flags('CPPFLAGS')
def link_add_flags(conf):
conf.add_os_flags('LINKFLAGS')
conf.add_os_flags('LDFLAGS','LINKFLAGS')
def cc_load_tools(conf):
conf.check_tool('cc')
def cxx_load_tools(conf):
conf.check_tool('cxx')
conf(ret_msg)
conf(validate_cfg)
conf(cmd_and_log)
conf(exec_cfg)
conf(check_cfg)
conf(validate_c)
conf(post_check)
conf(check)
conf(run_c_code)
conf(check_cxx)
conf(check_cc)
conf(define)
conf(undefine)
conf(define_cond)
conf(is_defined)
conf(get_define)
conf(have_define)
conf(write_config_header)
conf(get_config_header)
conftest(find_cpp)
conftest(cc_add_flags)
conftest(cxx_add_flags)
conftest(link_add_flags)
conftest(cc_load_tools)
conftest(cxx_load_tools)
|
gpl-3.0
|
lukeiwanski/tensorflow-opencl
|
tensorflow/python/kernel_tests/topk_op_test.py
|
58
|
3911
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TopK op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class TopKTest(test.TestCase):
def _validateTopK(self,
inputs,
k,
expected_values,
expected_indices,
sorted=True):
np_values = np.array(expected_values)
np_indices = np.array(expected_indices)
with self.test_session():
values_op, indices_op = nn_ops.top_k(inputs, k, sorted=sorted)
values = values_op.eval()
indices = indices_op.eval()
self.assertAllClose(np_values, values)
self.assertAllEqual(np_indices, indices)
self.assertShapeEqual(np_values, values_op)
self.assertShapeEqual(np_indices, indices_op)
def testTop1(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
self._validateTopK(inputs, 1, [[0.4], [0.3]], [[3], [1]])
def testTop2(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
self._validateTopK(inputs, 2, [[0.4, 0.3], [0.3, 0.3]], [[3, 1], [1, 2]])
def testTopAll(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
self._validateTopK(inputs, 4, [[0.4, 0.3, 0.2, 0.1], [0.3, 0.3, 0.2, 0.1]],
[[3, 1, 2, 0], [1, 2, 3, 0]])
def testTop3Unsorted(self):
inputs = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.3, 0.3, 0.2]]
self._validateTopK(
inputs,
3, [[0.2, 0.3, 0.4], [0.2, 0.3, 0.3]], [[2, 1, 3], [3, 1, 2]],
sorted=False)
def testTop3Vector(self):
inputs = [3, 6, 15, 18, 6, 12, 1, 17, 3, 0, 4, 19, 1, 6]
self._validateTopK(inputs, 3, [19, 18, 17], [11, 3, 7])
def testTensorK(self):
inputs = [3, 6, 15, 18, 6, 12, 1, 17, 3, 0, 4, 19, 1, 6]
k = constant_op.constant(3)
self._validateTopK(inputs, k, [19, 18, 17], [11, 3, 7])
def testKNegative(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.test_session():
k = array_ops.placeholder(dtypes.int32)
values, _ = nn_ops.top_k(inputs, k)
with self.assertRaisesOpError("Need k >= 0, got -7"):
values.eval(feed_dict={k: -7})
def testKTooLarge(self):
inputs = [[0.1, 0.2], [0.3, 0.4]]
with self.assertRaisesRegexp(ValueError,
r"must have last dimension >= k = 4"):
nn_ops.top_k(inputs, 4)
def testTopKGradients(self):
with self.test_session() as sess:
inputs = array_ops.placeholder(dtypes.int32, shape=[2, 5])
values, _ = nn_ops.top_k(inputs, 3)
grad = sess.run(
gradients_impl.gradients(
values, inputs, grad_ys=[[[1, 2, 3], [4, 5, 6]]]),
feed_dict={inputs: [[2, -1, 1000, 3, 4], [1, 5, 2, 4, 3]]})[0]
self.assertEqual(grad.tolist(), [[0, 0, 1, 3, 2], [0, 4, 0, 5, 6]])
if __name__ == "__main__":
test.main()
|
apache-2.0
|
timj/scons
|
src/engine/SCons/Tool/cyglink.py
|
6
|
8493
|
"""SCons.Tool.cyglink
Customization of gnulink for Cygwin (http://www.cygwin.com/)
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
from __future__ import absolute_import, print_function
import re
import os
import SCons.Action
import SCons.Util
import SCons.Tool
#MAYBE: from . import gnulink
from . import gnulink
from . import link
def _lib_generator(target, source, env, for_signature, **kw):
try: cmd = kw['cmd']
except KeyError: cmd = SCons.Util.CLVar(['$SHLINK'])
try: vp = kw['varprefix']
except KeyError: vp = 'SHLIB'
dll = env.FindIxes(target, '%sPREFIX' % vp, '%sSUFFIX' % vp)
if dll: cmd.extend(['-o', dll])
cmd.extend(['$SHLINKFLAGS', '$__%sVERSIONFLAGS' % vp, '$__RPATH'])
implib = env.FindIxes(target, 'IMPLIBPREFIX', 'IMPLIBSUFFIX')
if implib:
cmd.extend([
'-Wl,--out-implib='+implib.get_string(for_signature),
'-Wl,--export-all-symbols',
'-Wl,--enable-auto-import',
'-Wl,--whole-archive', '$SOURCES',
'-Wl,--no-whole-archive', '$_LIBDIRFLAGS', '$_LIBFLAGS'
])
else:
cmd.extend(['$SOURCES', '$_LIBDIRFLAGS', '$_LIBFLAGS'])
return [cmd]
def shlib_generator(target, source, env, for_signature):
return _lib_generator(target, source, env, for_signature,
varprefix='SHLIB',
cmd = SCons.Util.CLVar(['$SHLINK']))
def ldmod_generator(target, source, env, for_signature):
return _lib_generator(target, source, env, for_signature,
varprefix='LDMODULE',
cmd = SCons.Util.CLVar(['$LDMODULE']))
def _lib_emitter(target, source, env, **kw):
Verbose = False
if Verbose:
print("_lib_emitter: target[0]=%r" % target[0].get_path())
try: vp = kw['varprefix']
except KeyError: vp = 'SHLIB'
try: libtype = kw['libtype']
except KeyError: libtype = 'ShLib'
dll = env.FindIxes(target, '%sPREFIX' % vp, '%sSUFFIX' % vp)
no_import_lib = env.get('no_import_lib', 0)
if Verbose:
print("_lib_emitter: dll=%r" % dll.get_path())
if not dll or len(target) > 1:
raise SCons.Errors.UserError("A shared library should have exactly one target with the suffix: %s" % env.subst("$%sSUFFIX" % vp))
# Remove any "lib" after the prefix
pre = env.subst('$%sPREFIX' % vp)
if dll.name[len(pre):len(pre)+3] == 'lib':
dll.name = pre + dll.name[len(pre)+3:]
if Verbose:
print("_lib_emitter: dll.name=%r" % dll.name)
orig_target = target
target = [env.fs.File(dll)]
target[0].attributes.shared = 1
if Verbose:
print("_lib_emitter: after target=[env.fs.File(dll)]: target[0]=%r" % target[0].get_path())
# Append an import lib target
if not no_import_lib:
# Create list of target libraries as strings
target_strings = env.ReplaceIxes(orig_target[0],
'%sPREFIX' % vp, '%sSUFFIX' % vp,
'IMPLIBPREFIX', 'IMPLIBSUFFIX')
if Verbose:
print("_lib_emitter: target_strings=%r" % target_strings)
implib_target = env.fs.File(target_strings)
if Verbose:
print("_lib_emitter: implib_target=%r" % implib_target.get_path())
implib_target.attributes.shared = 1
target.append(implib_target)
symlinks = SCons.Tool.ImpLibSymlinkGenerator(env, implib_target,
implib_libtype=libtype,
generator_libtype=libtype+'ImpLib')
if Verbose:
print("_lib_emitter: implib symlinks=%r" % SCons.Tool.StringizeLibSymlinks(symlinks))
if symlinks:
SCons.Tool.EmitLibSymlinks(env, symlinks, implib_target, clean_targets = target[0])
implib_target.attributes.shliblinks = symlinks
return (target, source)
def shlib_emitter(target, source, env):
return _lib_emitter(target, source, env, varprefix='SHLIB', libtype='ShLib')
def ldmod_emitter(target, source, env):
return _lib_emitter(target, source, env, varprefix='LDMODULE', libtype='LdMod')
def _versioned_lib_suffix(env, suffix, version):
"""Generate versioned shared library suffix from a unversioned one.
If suffix='.dll', and version='0.1.2', then it returns '-0-1-2.dll'"""
Verbose = False
if Verbose:
print("_versioned_lib_suffix: suffix= ", suffix)
print("_versioned_lib_suffix: version= ", version)
cygversion = re.sub('\.', '-', version)
if not suffix.startswith('-' + cygversion):
suffix = '-' + cygversion + suffix
if Verbose:
print("_versioned_lib_suffix: return suffix= ", suffix)
return suffix
def _versioned_implib_name(env, libnode, version, prefix, suffix, **kw):
return link._versioned_lib_name(env, libnode, version, prefix, suffix,
SCons.Tool.ImpLibPrefixGenerator,
SCons.Tool.ImpLibSuffixGenerator,
implib_libtype=kw['libtype'])
def _versioned_implib_symlinks(env, libnode, version, prefix, suffix, **kw):
"""Generate link names that should be created for a versioned shared library.
Returns a list in the form [ (link, linktarget), ... ]
"""
Verbose = False
if Verbose:
print("_versioned_implib_symlinks: libnode=%r" % libnode.get_path())
print("_versioned_implib_symlinks: version=%r" % version)
try: libtype = kw['libtype']
except KeyError: libtype = 'ShLib'
linkdir = os.path.dirname(libnode.get_path())
if Verbose:
print("_versioned_implib_symlinks: linkdir=%r" % linkdir)
name = SCons.Tool.ImpLibNameGenerator(env, libnode,
implib_libtype=libtype,
generator_libtype=libtype+'ImpLib')
if Verbose:
print("_versioned_implib_symlinks: name=%r" % name)
major = version.split('.')[0]
link0 = env.fs.File(os.path.join(linkdir, name))
symlinks = [(link0, libnode)]
if Verbose:
print("_versioned_implib_symlinks: return symlinks=%r" % SCons.Tool.StringizeLibSymlinks(symlinks))
return symlinks
shlib_action = SCons.Action.Action(shlib_generator, generator=1)
ldmod_action = SCons.Action.Action(ldmod_generator, generator=1)
def generate(env):
"""Add Builders and construction variables for cyglink to an Environment."""
gnulink.generate(env)
env['LINKFLAGS'] = SCons.Util.CLVar('-Wl,-no-undefined')
env['SHLINKCOM'] = shlib_action
env['LDMODULECOM'] = ldmod_action
env.Append(SHLIBEMITTER = [shlib_emitter])
env.Append(LDMODULEEMITTER = [ldmod_emitter])
env['SHLIBPREFIX'] = 'cyg'
env['SHLIBSUFFIX'] = '.dll'
env['IMPLIBPREFIX'] = 'lib'
env['IMPLIBSUFFIX'] = '.dll.a'
# Variables used by versioned shared libraries
env['_SHLIBVERSIONFLAGS'] = '$SHLIBVERSIONFLAGS'
env['_LDMODULEVERSIONFLAGS'] = '$LDMODULEVERSIONFLAGS'
# SHLIBVERSIONFLAGS and LDMODULEVERSIONFLAGS are same as in gnulink...
# LINKCALLBACKS are NOT inherited from gnulink
env['LINKCALLBACKS'] = {
'VersionedShLibSuffix' : _versioned_lib_suffix,
'VersionedLdModSuffix' : _versioned_lib_suffix,
'VersionedImpLibSuffix' : _versioned_lib_suffix,
'VersionedShLibName' : link._versioned_shlib_name,
'VersionedLdModName' : link._versioned_ldmod_name,
'VersionedShLibImpLibName' : lambda *args: _versioned_implib_name(*args, libtype='ShLib'),
'VersionedLdModImpLibName' : lambda *args: _versioned_implib_name(*args, libtype='LdMod'),
'VersionedShLibImpLibSymlinks' : lambda *args: _versioned_implib_symlinks(*args, libtype='ShLib'),
'VersionedLdModImpLibSymlinks' : lambda *args: _versioned_implib_symlinks(*args, libtype='LdMod'),
}
# these variables were set by gnulink but are not used in cyglink
try: del env['_SHLIBSONAME']
except KeyError: pass
try: del env['_LDMODULESONAME']
except KeyError: pass
def exists(env):
return gnulink.exists(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
|
jeyraof/python-social-auth
|
social/apps/django_app/middleware.py
|
70
|
2285
|
# -*- coding: utf-8 -*-
import six
from django.conf import settings
from django.contrib import messages
from django.contrib.messages.api import MessageFailure
from django.shortcuts import redirect
from django.utils.http import urlquote
from social.exceptions import SocialAuthBaseException
from social.utils import social_logger
class SocialAuthExceptionMiddleware(object):
"""Middleware that handles Social Auth AuthExceptions by providing the user
with a message, logging an error, and redirecting to some next location.
By default, the exception message itself is sent to the user and they are
redirected to the location specified in the SOCIAL_AUTH_LOGIN_ERROR_URL
setting.
This middleware can be extended by overriding the get_message or
get_redirect_uri methods, which each accept request and exception.
"""
def process_exception(self, request, exception):
strategy = getattr(request, 'social_strategy', None)
if strategy is None or self.raise_exception(request, exception):
return
if isinstance(exception, SocialAuthBaseException):
backend = getattr(request, 'backend', None)
backend_name = getattr(backend, 'name', 'unknown-backend')
message = self.get_message(request, exception)
social_logger.error(message)
url = self.get_redirect_uri(request, exception)
try:
messages.error(request, message,
extra_tags='social-auth ' + backend_name)
except MessageFailure:
url += ('?' in url and '&' or '?') + \
'message={0}&backend={1}'.format(urlquote(message),
backend_name)
return redirect(url)
def raise_exception(self, request, exception):
strategy = getattr(request, 'social_strategy', None)
if strategy is not None:
return strategy.setting('RAISE_EXCEPTIONS', settings.DEBUG)
def get_message(self, request, exception):
return six.text_type(exception)
def get_redirect_uri(self, request, exception):
strategy = getattr(request, 'social_strategy', None)
return strategy.setting('LOGIN_ERROR_URL')
|
bsd-3-clause
|
rubikloud/scikit-learn
|
sklearn/datasets/california_housing.py
|
3
|
3926
|
"""California housing dataset.
The original database is available from StatLib
http://lib.stat.cmu.edu/
The data contains 20,640 observations on 9 variables.
This dataset contains the average house value as target variable
and the following input variables (features): average income,
housing average age, average rooms, average bedrooms, population,
average occupation, latitude, and longitude in that order.
References
----------
Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,
Statistics and Probability Letters, 33 (1997) 291-297.
"""
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from io import BytesIO
import os
from os.path import exists
from os import makedirs
import tarfile
try:
# Python 2
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.request import urlopen
import numpy as np
from .base import get_data_home, Bunch
from .base import _pkl_filepath
from ..externals import joblib
DATA_URL = "http://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.tgz"
TARGET_FILENAME = "cal_housing.pkz"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_california_housing(data_home=None, download_if_missing=True):
"""Loader for the California housing dataset from StatLib.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : ndarray, shape [20640, 8]
Each row corresponding to the 8 feature values in order.
dataset.target : numpy array of shape (20640,)
Each value corresponds to the average house value in units of 100,000.
dataset.feature_names : array of length 8
Array of ordered feature names used in the dataset.
dataset.DESCR : string
Description of the California housing dataset.
Notes
------
This dataset consists of 20,640 samples and 9 features.
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
filepath = _pkl_filepath(data_home, TARGET_FILENAME)
if not exists(filepath):
print('downloading Cal. housing from %s to %s' % (DATA_URL, data_home))
archive_fileobj = BytesIO(urlopen(DATA_URL).read())
fileobj = tarfile.open(
mode="r:gz",
fileobj=archive_fileobj).extractfile(
'CaliforniaHousing/cal_housing.data')
cal_housing = np.loadtxt(fileobj, delimiter=',')
# Columns are not in the same order compared to the previous
# URL resource on lib.stat.cmu.edu
columns_index = [8, 7, 2, 3, 4, 5, 6, 1, 0]
cal_housing = cal_housing[:, columns_index]
joblib.dump(cal_housing, filepath, compress=6)
else:
cal_housing = joblib.load(filepath)
feature_names = ["MedInc", "HouseAge", "AveRooms", "AveBedrms",
"Population", "AveOccup", "Latitude", "Longitude"]
target, data = cal_housing[:, 0], cal_housing[:, 1:]
# avg rooms = total rooms / households
data[:, 2] /= data[:, 5]
# avg bed rooms = total bed rooms / households
data[:, 3] /= data[:, 5]
# avg occupancy = population / housholds
data[:, 5] = data[:, 4] / data[:, 5]
# target in units of 100,000
target = target / 100000.0
return Bunch(data=data,
target=target,
feature_names=feature_names,
DESCR=MODULE_DOCS)
|
bsd-3-clause
|
benjello/openfisca-france
|
openfisca_france/scripts/calculateur_impots/extraction_champs_impots.py
|
3
|
3057
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Use finances.gouv.fr web simulator as an API to compute income taxes."""
import argparse
import collections
import logging
import os
import sys
import urllib2
from lxml import etree
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def main():
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('-v', '--verbose', action = 'store_true', default = False, help = "increase output verbosity")
args = parser.parse_args()
logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)
request = urllib2.Request('http://www3.finances.gouv.fr/calcul_impot/2014/simplifie/calc_s_data.htm', headers = {
'User-Agent': 'OpenFisca-Script',
})
response = urllib2.urlopen(request)
page_doc = etree.parse(response, etree.HTMLParser())
fields = collections.OrderedDict()
for element in page_doc.xpath('//*[@name]'):
tag = element.tag.lower()
if tag in ('a', 'form'):
continue
assert tag == 'input', tag
# field = collections.OrderedDict(
# (attribute_name, attribute_value)
# for attribute_name, attribute_value in attributes.iteritems()
# if attribute_value
# )
attributes = element.attrib
name = attributes['name']
type = attributes['type']
if type in ('checkbox', 'radio'):
existing_field = fields.get(name)
if existing_field is None:
field = collections.OrderedDict(
(attribute_name, attribute_value)
for attribute_name, attribute_value in attributes.iteritems()
if attribute_value
)
if field.pop('checked', False):
field['default'] = field['value']
field['values'] = [field.pop('value')]
fields[name] = field
else:
value = attributes['value']
for attribute_name, attribute_value in attributes.iteritems():
if attribute_name == 'value':
continue
if attribute_name == 'checked':
existing_field['default'] = value
continue
assert existing_field.get(attribute_name) == attribute_value, etree.tostring(element)
existing_field['values'].append(value)
else:
assert type in ('hidden', 'text'), type
assert name not in fields, name
fields[name] = collections.OrderedDict(
(attribute_name, attribute_value)
for attribute_name, attribute_value in attributes.iteritems()
if attribute_value
)
import json
print json.dumps(fields, encoding = 'utf-8', ensure_ascii = False, indent = 2)
return 0
if __name__ == "__main__":
sys.exit(main())
|
agpl-3.0
|
4rado/RepositoryForProject
|
Lib/site-packages/scipy/fftpack/setup.py
|
51
|
1461
|
#!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
# Created by Pearu Peterson, August 2002
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('fftpack',parent_package, top_path)
config.add_data_dir('tests')
config.add_data_dir('benchmarks')
config.add_library('dfftpack',
sources=[join('src/dfftpack','*.f')])
config.add_library('fftpack',
sources=[join('src/fftpack','*.f')])
sources = ['fftpack.pyf','src/zfft.c','src/drfft.c','src/zrfft.c',
'src/zfftnd.c', 'src/dct.c.src']
config.add_extension('_fftpack',
sources=sources,
libraries=['dfftpack', 'fftpack'],
include_dirs=['src'])
config.add_extension('convolve',
sources=['convolve.pyf','src/convolve.c'],
libraries=['dfftpack'],
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
from fftpack_version import fftpack_version
setup(version=fftpack_version,
description='fftpack - Discrete Fourier Transform package',
author='Pearu Peterson',
author_email = '[email protected]',
maintainer_email = '[email protected]',
license = 'SciPy License (BSD Style)',
**configuration(top_path='').todict())
|
gpl-3.0
|
greg17477/kernel_franco_mako
|
Documentation/networking/cxacru-cf.py
|
14668
|
1626
|
#!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
|
gpl-2.0
|
tic-ull/ufc
|
ufc.py
|
1
|
12807
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Este script hace accounting de todos los correos que mueve postfix.
#
# Con esta información se pretende poder controlar el flujo de mensajes
# salientes de postfix.
#
#
# TODO
# - Implementar un decorador needs_root que verifique si el usuario que ejecuta
# el script es root (necesario para ejecutar postsuper).
#
# FIXME
# - Aunque se libere un baneo se sigue comprobando si el usuario ha superado el
# límite configurado. Este límite no hay modo de saltárselo.
#
import daemon
import datetime
import sys
import os
import ConfigParser
import socket
from optparse import OptionParser
from cStringIO import StringIO
import traceback
from sqlalchemy import create_engine, exc, or_
from sqlalchemy.orm import sessionmaker
from models import metadata, Ban, Log
from utils import fatal_error, sendMail, str_to_list
import postfix
import logging
log = logging.getLogger('ufc')
log.setLevel(logging.INFO)
from logging.handlers import SysLogHandler
syslog = SysLogHandler('/dev/log', facility = SysLogHandler.LOG_MAIL)
syslog.setFormatter(logging.Formatter('%(name)s: %(levelname)s %(message)s'))
log.addHandler(syslog)
config_path = '/etc/ufc.cfg'
mail_tpl = """
El usuario %s ha pretendido enviar en %s segundos más de %s mensajes.
La actividad se ha detectado en la máquina %s.
"""
class UFC():
"ULL Flow Control"
# Únicamente tenemos en cuenta correos cuyo remitente sea de nuestro dominio
only_from_domain = True
# Lista de usuarios a los que no se le aplica este filtro
whitelist = []
# Mensajes de respuesta
ans = 'El usuario ha superado el limite de correos enviados por minuto'
hold = "HOLD %s" % ans
reject = "REJECT %s" % ans
dunno = "DUNNO"
# Configuración del correo
smtp_server = 'localhost'
recipients = ['root', ]
fqdn = socket.getfqdn()
tls_required = True
def __init__(self, verbose = False, listen_tcp = True):
if verbose:
log.setLevel(logging.DEBUG)
self.listen_tcp = listen_tcp
self.configure()
def configure(self):
config = ConfigParser.ConfigParser()
try:
config.read(config_path)
except IOError:
log.error('Error reading configuration from %s' % config_path)
return False
log.info("Reading configuration from %s" % config_path)
self.smtp_server = self.get_config(config, 'smtp', 'server', self.smtp_server)
self.recipients = self.get_config(config, 'smtp', 'recipients', self.recipients)
if type(self.recipients) == str:
self.recipients = str_to_list(self.recipients)
# Limits configuration
self.max_time = int(self.get_config(config, 'limits', 'max_time'))
self.max_email = int(self.get_config(config, 'limits', 'max_email'))
self.domain = self.get_config(config, 'limits', 'domain')
if self.domain is None:
try:
self.domain = self.fqdn.split('.', 1)[1]
except IndexError:
self.domain = 'localdomain'
log.info("Domain: %s " % self.domain)
self.whitelist = self.get_config(config, 'limits', 'whitelist', self.whitelist)
if type(self.whitelist) == str:
self.whitelist = str_to_list(self.whitelist)
log.info("Whitelist: %s " % self.whitelist)
connection_string = self.get_config(config, 'database', 'connection_string')
try:
connection = create_engine(connection_string, pool_size=25, max_overflow=25, pool_recycle=30)
except Exception, e:
log.error('Database access error, connection string used: %s. Error: %s' % (connection_string, e))
return False
metadata.create_all(connection, checkfirst=True)
self.session = sessionmaker(bind=connection)
return True
def get_config(self, config, section, option, default = None):
log.debug("Reading [%s]:%s from config" % (section, option))
try:
value = config.get(section, option)
log.debug("Found value: %s" % value)
return value
except ConfigParser.NoSectionError, e:
if default is not None:
log.debug('Not found, using default: %s' % default)
return default
fatal_error('Error: %s' % e)
except ConfigParser.NoOptionError, e:
if default is not None:
log.debug('Not found, using default: %s' % default)
return default
fatal_error('Error: %s' % e)
def dbinsert(self, DBObj):
ses = self.session()
ses.add(DBObj)
try:
ses.commit()
ses.close()
except exc.ResourceClosedError:
# Si hay desconexion de la BD pasa por esta excepcion; no hay que hacer nada porque lo
# reintenta con otra sesion del pool
pass
except exc.InvalidRequestError:
# Ante algunos errores de conexion a BD, hay que llamar rollback explicitamente antes
# de poder escribir a la BD
ses.rollback()
except exc.TimeoutError:
# Cuando llega al tope de sesiones y max_overflow, se queda frito y da timeouts
# En teoría vuelve a intentarlo tras 30 segundos, así que simplemente hacemos un pass
log.warning("Sin conexiones en el pool! La operacion sobre la BD no se pudo realizar.")
pass
ses.close()
def get_sender(self, req):
if req['sasl_username']:
if req['sasl_username'].find('@') == -1:
sender = "%s@%s" % (req['sasl_username'].lower(), self.domain)
else:
sender = req['sasl_username'].lower()
log.debug("Using sasl_username as sender: %s" % sender)
return sender
return req['sender'].lower()
def append_to_log(self, request):
if request['size'] is not None:
request['size'] = int(request['size'])
request['request_time'] = datetime.datetime.now()
request['expiresAt'] = request['request_time'] + datetime.timedelta(weeks=1)
request['real_sender'] = self.get_sender(request)
logentry = Log(request)
self.dbinsert(logentry)
def is_banned(self, user, ses):
bans = ses.query(Ban).filter(Ban.sender == user).\
filter(or_(Ban.expires_at == None, Ban.expires_at > datetime.datetime.now()))
if not bans.count():
return False
return bans
def ban_user(self, user):
ban = Ban(sender = user, created = datetime.datetime.now(), host = self.fqdn)
self.dbinsert(ban)
sendMail(
'Bloqueado el envío de correo del usuario %s' % user,
mail_tpl % (user, self.max_time, self.max_email, self.fqdn),
self.smtp_server,
self.tls_required,
self.recipients,
user
)
def unban_user(self, user):
ses = self.session()
bans = self.is_banned(user, ses)
if not bans:
msg = "The user %s doesn't have bans to release" % user
print msg
log.warning(msg)
else:
msg = "Releasing %s bans for user %s" % (bans.count(), user)
print msg
log.info(msg)
now = datetime.datetime.now()
for b in bans:
log.debug("Setting expire time to %s to the ban created at %s in %s" % (now, b.created, b.host))
b.expires_at = now
ses.commit()
ses.close()
def release_mail(self, user):
"""
Unhold mail from the user
"""
msgs = postfix.mailq(sender = user, queue = postfix.HOLD_QUEUE)
postfix.release_mail(msgs.keys())
def remove_mail(self, user):
"""
Remove mail from the user
"""
msgs = postfix.mailq(sender = user, queue = postfix.HOLD_QUEUE)
postfix.remove_mail(msgs.keys())
def check_limits(self, request):
"""
Con la información que nos manda postfix que tenemos en request tenemos que
tomar la decisión de qué hacer.
Las acciones que podemos ordenar son todas las que se pueden poner en un access map:
http://www.postfix.org/access.5.html
"""
sender = request['real_sender']
log.info("[%s] - %s - %s => %s" % (
request['request_time'], request['client_address'], \
sender, request['recipient']))
if sender in self.whitelist:
log.debug("%s address is in whitelist, ignoring", sender)
return self.dunno
if self.only_from_domain and \
self.domain and \
not sender.endswith(self.domain):
# No es de nuestro dominio
log.debug("%s address is not from domain %s, ignoring", sender, self.domain)
return self.dunno
ses = self.session()
if self.is_banned(sender, ses):
log.debug("Intento de envío de correo de un usuario baneado: %s" % sender)
ses.close()
return self.hold
time = request['request_time'] - datetime.timedelta(seconds = self.max_time)
sent_emails = ses.query(Log).filter(Log.real_sender == sender).\
filter(Log.request_time > time).count()
if sent_emails < self.max_email:
ses.close()
return self.dunno
log.info("Bloqueando correo del usuario %s por enviar %d correos en menos de %d segundos" % \
(sender, sent_emails, self.max_time))
self.ban_user(sender)
ses.close()
return self.hold
def check(self, lines):
# Convertimos la entrada en un diccionario
req = dict([line.split('=', 1) for line in lines if line])
# Nos quedamos únicamente con los campos definidos en el modelo Log
req = dict([(k, req[k]) for k in req.keys() if hasattr(Log, k)])
self.append_to_log(req)
return self.check_limits(req)
def purge(self):
log.info("Expirando entradas antiguas en el Log")
ses = self.session()
ses.query(Log).filter(Log.expires_at < datetime.datetime.now()).delete(synchronize_session='fetch')
ses.close()
return True
def read_line_from_stdin(self):
line = ''
for c in sys.stdin.readline():
if c == '\n':
break
line += c
return line.strip()
def read_lines_from_stdin(self):
lines = []
while True:
line = self.read_line_from_stdin()
log.debug("Stdin: %s" % line)
if line == "":
break
lines.append(line)
return lines
def process(self):
if self.listen_tcp:
import server
server.start(self)
else:
lines = self.read_lines_from_stdin()
print "action=%s\n" % self.check(lines)
def main(options):
ufc = UFC(options.verbose, not options.stdin)
if options.purge:
ufc.purge()
elif options.unban_email:
ufc.unban_user(options.unban_email)
if options.release:
ufc.release_mail(options.unban_email)
elif options.remove:
ufc.remove_mail(options.unban_email)
else:
ufc.process()
if __name__ == "__main__":
try:
parser = OptionParser()
parser.add_option("-p", "--purge", dest="purge", action="store_true", help="Purge log database", default=False)
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="Verbose", default=False)
parser.add_option("-d", "--daemon", dest="daemon", action="store_true", help="Daemonize", default=False)
parser.add_option("-s", "--stdin", dest="stdin", action="store_true", help="Read input from STDIN (no listen TCP)", default=False)
parser.add_option("--unban", dest="unban_email", help="Unban email (implies --no-daemon)")
parser.add_option("--release", dest="release", action="store_true", help="Release mail from hold sent by unbanned user", default=False)
parser.add_option("--remove", dest="remove", action="store_true", help="Remove mail from hold sent by unbanned user", default=False)
options, args = parser.parse_args()
if options.daemon:
with daemon.DaemonContext():
main(options)
else:
main(options)
except Exception, e:
fp = StringIO()
traceback.print_exc(file=fp)
log.error('Excepcion no controlada del tipo %s: %s' % (type(e), e))
log.error('%s' % fp.getvalue())
fatal_error()
|
gpl-3.0
|
areski/django
|
django/templatetags/cache.py
|
471
|
3389
|
from __future__ import unicode_literals
from django.core.cache import InvalidCacheBackendError, caches
from django.core.cache.utils import make_template_fragment_key
from django.template import (
Library, Node, TemplateSyntaxError, VariableDoesNotExist,
)
register = Library()
class CacheNode(Node):
def __init__(self, nodelist, expire_time_var, fragment_name, vary_on, cache_name):
self.nodelist = nodelist
self.expire_time_var = expire_time_var
self.fragment_name = fragment_name
self.vary_on = vary_on
self.cache_name = cache_name
def render(self, context):
try:
expire_time = self.expire_time_var.resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError('"cache" tag got an unknown variable: %r' % self.expire_time_var.var)
try:
expire_time = int(expire_time)
except (ValueError, TypeError):
raise TemplateSyntaxError('"cache" tag got a non-integer timeout value: %r' % expire_time)
if self.cache_name:
try:
cache_name = self.cache_name.resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError('"cache" tag got an unknown variable: %r' % self.cache_name.var)
try:
fragment_cache = caches[cache_name]
except InvalidCacheBackendError:
raise TemplateSyntaxError('Invalid cache name specified for cache tag: %r' % cache_name)
else:
try:
fragment_cache = caches['template_fragments']
except InvalidCacheBackendError:
fragment_cache = caches['default']
vary_on = [var.resolve(context) for var in self.vary_on]
cache_key = make_template_fragment_key(self.fragment_name, vary_on)
value = fragment_cache.get(cache_key)
if value is None:
value = self.nodelist.render(context)
fragment_cache.set(cache_key, value, expire_time)
return value
@register.tag('cache')
def do_cache(parser, token):
"""
This will cache the contents of a template fragment for a given amount
of time.
Usage::
{% load cache %}
{% cache [expire_time] [fragment_name] %}
.. some expensive processing ..
{% endcache %}
This tag also supports varying by a list of arguments::
{% load cache %}
{% cache [expire_time] [fragment_name] [var1] [var2] .. %}
.. some expensive processing ..
{% endcache %}
Optionally the cache to use may be specified thus::
{% cache .... using="cachename" %}
Each unique set of arguments will result in a unique cache entry.
"""
nodelist = parser.parse(('endcache',))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("'%r' tag requires at least 2 arguments." % tokens[0])
if len(tokens) > 3 and tokens[-1].startswith('using='):
cache_name = parser.compile_filter(tokens[-1][len('using='):])
tokens = tokens[:-1]
else:
cache_name = None
return CacheNode(nodelist,
parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can't be a variable.
[parser.compile_filter(t) for t in tokens[3:]],
cache_name,
)
|
bsd-3-clause
|
civisanalytics/ansible
|
test/units/modules/network/eos/test_eos_banner.py
|
15
|
2312
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.eos import eos_banner
from .eos_module import TestEosModule, load_fixture, set_module_args
class TestEosBannerModule(TestEosModule):
module = eos_banner
def setUp(self):
self.mock_run_commands = patch('ansible.modules.network.eos.eos_banner.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.eos.eos_banner.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_run_commands.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None):
self.run_commands.return_value = load_fixture('eos_banner_show_banner.txt').strip()
self.load_config.return_value = dict(diff=None, session='session')
def test_eos_banner_create(self):
set_module_args(dict(banner='login', text='test\nbanner\nstring'))
commands = ['banner login', 'test', 'banner', 'string', 'EOF']
self.execute_module(changed=True, commands=commands)
def test_eos_banner_remove(self):
set_module_args(dict(banner='login', state='absent'))
commands = ['no banner login']
self.execute_module(changed=True, commands=commands)
def test_eos_banner_nochange(self):
banner_text = load_fixture('eos_banner_show_banner.txt').strip()
set_module_args(dict(banner='login', text=banner_text))
self.execute_module()
|
gpl-3.0
|
mhrivnak/pulp
|
server/test/unit/server/webservices/views/test_search.py
|
1
|
11706
|
"""
This module contains tests for the pulp.server.webservices.views.search module.
"""
import unittest
import mock
from django import http
from base import assert_auth_READ
from pulp.server import exceptions
from pulp.server.webservices.views import search
class TestSearchView(unittest.TestCase):
"""
Test the SearchView class.
"""
@mock.patch('pulp.server.webservices.controllers.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.search.criteria.Criteria.from_client_input')
def test_get_with_fields(self, from_client_input):
"""
Test the GET search handler with fields in the request.
"""
class FakeSearchView(search.SearchView):
model = mock.MagicMock()
request = mock.MagicMock()
# Simulate an empty POST body
request.GET = {'field': ['name', 'id'], 'filters': ['{"name":"admin"}']}
view = FakeSearchView()
FakeSearchView.model.objects.find_by_criteria.return_value = ['big money', 'bigger money']
with mock.patch.object(FakeSearchView, '_generate_response',
side_effect=FakeSearchView._generate_response) as _generate_response:
results = view.get(request)
self.assertEqual(type(results), http.HttpResponse)
self.assertEqual(results.content, '["big money", "bigger money"]')
self.assertEqual(results.status_code, 200)
# This is actually a bug, but the intention of this Django port was to behave exactly like
# The webpy handlers did, bugs included. When #312 is fixed, the tests below should fail,
# because the get() handler should have deserialized the filters instead of leaving them as
# strings. Please modify these assertions to have the correct behavior.
# https://pulp.plan.io/issues/312
_generate_response.assert_called_once_with(
{'fields': ['name', 'id'], 'filters': ['{"name":"admin"}']})
from_client_input.assert_called_once_with(
{'fields': ['name', 'id'], 'filters': ['{"name":"admin"}']})
@mock.patch('pulp.server.webservices.controllers.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.search.criteria.Criteria.from_client_input')
def test_get_without_fields(self, from_client_input):
"""
Test the GET search handler without any fields specified in the request.
"""
class FakeSearchView(search.SearchView):
model = mock.MagicMock()
request = mock.MagicMock()
# Simulate an empty POST body
request.GET = {'filters': ['{"name":"admin"}']}
view = FakeSearchView()
FakeSearchView.model.objects.find_by_criteria.return_value = ['big money', 'bigger money']
with mock.patch.object(FakeSearchView, '_generate_response',
side_effect=FakeSearchView._generate_response) as _generate_response:
results = view.get(request)
self.assertEqual(type(results), http.HttpResponse)
self.assertEqual(results.content, '["big money", "bigger money"]')
self.assertEqual(results.status_code, 200)
# This is actually a bug, but the intention of this Django port was to behave exactly like
# The webpy handlers did, bugs included. When #312 is fixed, the tests below should fail,
# because the get() handler should have deserialized the filters instead of leaving them as
# strings. Please modify these assertions to have the correct behavior.
# https://pulp.plan.io/issues/312
_generate_response.assert_called_once_with({'filters': ['{"name":"admin"}']})
from_client_input.assert_called_once_with({'filters': ['{"name":"admin"}']})
@mock.patch('pulp.server.webservices.controllers.decorators._verify_auth',
new=assert_auth_READ())
def test_post(self):
"""
Test the POST search under normal conditions.
"""
class FakeSearchView(search.SearchView):
model = mock.MagicMock()
request = mock.MagicMock()
# Simulate an empty POST body
request.body = '{"criteria": {"filters": {"money": {"$gt": 1000000}}}}'
view = FakeSearchView()
FakeSearchView.model.objects.find_by_criteria.return_value = ['big money', 'bigger money']
with mock.patch.object(FakeSearchView, '_generate_response',
side_effect=FakeSearchView._generate_response) as _generate_response:
results = view.post(request)
self.assertEqual(type(results), http.HttpResponse)
self.assertEqual(results.content, '["big money", "bigger money"]')
self.assertEqual(results.status_code, 200)
_generate_response.assert_called_once_with({'filters': {'money': {'$gt': 1000000}}})
@mock.patch('pulp.server.webservices.controllers.decorators._verify_auth',
new=assert_auth_READ())
def test_post_missing_criteria(self):
"""
Test that POST search when the user has not passed the required criteria.
"""
request = mock.MagicMock()
# Simulate an empty POST body
request.body = "{}"
view = search.SearchView()
try:
view.post(request)
self.fail('A MissingValue Exception should have been raised.')
except exceptions.MissingValue, e:
self.assertEqual(e.property_names, ['criteria'])
def test__generate_response_no_fields(self):
"""
Test that _generate_response() works correctly when the query does not contain fields.
"""
class FakeSearchView(search.SearchView):
model = mock.MagicMock()
query = {'filters': {'money': {'$gt': 1000000}}}
FakeSearchView.model.objects.find_by_criteria.return_value = ['big money', 'bigger money']
results = FakeSearchView._generate_response(query)
self.assertEqual(type(results), http.HttpResponse)
self.assertEqual(results.content, '["big money", "bigger money"]')
self.assertEqual(results.status_code, 200)
self.assertEqual(
FakeSearchView.model.objects.find_by_criteria.mock_calls[0][1][0]['fields'], None)
self.assertEqual(
FakeSearchView.model.objects.find_by_criteria.mock_calls[0][1][0]['filters'],
{'money': {'$gt': 1000000}})
def test__generate_response_with_custom_response_builder(self):
"""
Test the _generate_response() method for the case where the SearchView is configured to
use a custom response_builder function.
"""
class FakeSearchView(search.SearchView):
response_builder = mock.MagicMock(return_value=42)
model = mock.MagicMock()
query = {'filters': {'money': {'$gt': 1000000}}}
FakeSearchView.model.objects.find_by_criteria.return_value = ['big money', 'bigger money']
results = FakeSearchView._generate_response(query)
self.assertEqual(results, 42)
self.assertEqual(
FakeSearchView.model.objects.find_by_criteria.mock_calls[0][1][0]['fields'], None)
self.assertEqual(
FakeSearchView.model.objects.find_by_criteria.mock_calls[0][1][0]['filters'],
{'money': {'$gt': 1000000}})
FakeSearchView.response_builder.assert_called_once_with(['big money', 'bigger money'])
def test__generate_response_with_dumb_model(self):
"""
Test the _generate_response() method for the case where the SearchView is configured to
search an old-style model.
"""
class FakeSearchView(search.SearchView):
manager = mock.MagicMock()
query = {'filters': {'money': {'$gt': 1000000}}}
FakeSearchView.manager.find_by_criteria.return_value = ['big money', 'bigger money']
results = FakeSearchView._generate_response(query)
self.assertEqual(type(results), http.HttpResponse)
self.assertEqual(results.content, '["big money", "bigger money"]')
self.assertEqual(results.status_code, 200)
self.assertEqual(
FakeSearchView.manager.find_by_criteria.mock_calls[0][1][0]['fields'], None)
self.assertEqual(
FakeSearchView.manager.find_by_criteria.mock_calls[0][1][0]['filters'],
{'money': {'$gt': 1000000}})
def test__generate_response_with_fields_with_id(self):
"""
Test that _generate_response() works correctly when the query contains fields that include
the id.
"""
class FakeSearchView(search.SearchView):
model = mock.MagicMock()
query = {'filters': {'money': {'$gt': 1000000}}, 'fields': ['cash', 'id']}
FakeSearchView.model.objects.find_by_criteria.return_value = ['big money', 'bigger money']
results = FakeSearchView._generate_response(query)
self.assertEqual(type(results), http.HttpResponse)
self.assertEqual(results.content, '["big money", "bigger money"]')
self.assertEqual(results.status_code, 200)
self.assertEqual(
FakeSearchView.model.objects.find_by_criteria.mock_calls[0][1][0]['fields'],
['cash', 'id'])
self.assertEqual(
FakeSearchView.model.objects.find_by_criteria.mock_calls[0][1][0]['filters'],
{'money': {'$gt': 1000000}})
def test__generate_response_with_fields_without_id(self):
"""
Test that _generate_response() works correctly when the query contains fields that do not
include the id.
"""
class FakeSearchView(search.SearchView):
model = mock.MagicMock()
query = {'filters': {'money': {'$gt': 1000000}}, 'fields': ['cash']}
FakeSearchView.model.objects.find_by_criteria.return_value = ['big money', 'bigger money']
results = FakeSearchView._generate_response(query)
self.assertEqual(type(results), http.HttpResponse)
self.assertEqual(results.content, '["big money", "bigger money"]')
self.assertEqual(results.status_code, 200)
self.assertEqual(
FakeSearchView.model.objects.find_by_criteria.mock_calls[0][1][0]['fields'],
['cash', 'id'])
self.assertEqual(
FakeSearchView.model.objects.find_by_criteria.mock_calls[0][1][0]['filters'],
{'money': {'$gt': 1000000}})
def test__generate_response_with_serializer(self):
"""
Test the _generate_response() method for the case where the SearchView is configured to
use a serializer.
"""
class FakeSearchView(search.SearchView):
model = mock.MagicMock()
serializer = mock.MagicMock(side_effect=['biggest money', 'unreal money'])
query = {'filters': {'money': {'$gt': 1000000}}}
FakeSearchView.model.objects.find_by_criteria.return_value = ['big money', 'bigger money']
results = FakeSearchView._generate_response(query)
self.assertEqual(type(results), http.HttpResponse)
self.assertEqual(results.content, '["biggest money", "unreal money"]')
self.assertEqual(results.status_code, 200)
self.assertEqual(
FakeSearchView.model.objects.find_by_criteria.mock_calls[0][1][0]['fields'], None)
self.assertEqual(
FakeSearchView.model.objects.find_by_criteria.mock_calls[0][1][0]['filters'],
{'money': {'$gt': 1000000}})
self.assertEqual([c[1][0] for c in FakeSearchView.serializer.mock_calls],
['big money', 'bigger money'])
|
gpl-2.0
|
MinimalOS/external_skia
|
platform_tools/android/bin/download_utils.py
|
149
|
8464
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A library to assist automatically downloading files.
This library is used by scripts that download tarballs, zipfiles, etc. as part
of the build process.
"""
import hashlib
import http_download
import os.path
import re
import shutil
import sys
import time
import urllib2
SOURCE_STAMP = 'SOURCE_URL'
HASH_STAMP = 'SOURCE_SHA1'
# Designed to handle more general inputs than sys.platform because the platform
# name may come from the command line.
PLATFORM_COLLAPSE = {
'windows': 'windows',
'win32': 'windows',
'cygwin': 'windows',
'linux': 'linux',
'linux2': 'linux',
'linux3': 'linux',
'darwin': 'mac',
'mac': 'mac',
}
ARCH_COLLAPSE = {
'i386' : 'x86',
'i686' : 'x86',
'x86_64': 'x86',
'armv7l': 'arm',
}
class HashError(Exception):
def __init__(self, download_url, expected_hash, actual_hash):
self.download_url = download_url
self.expected_hash = expected_hash
self.actual_hash = actual_hash
def __str__(self):
return 'Got hash "%s" but expected hash "%s" for "%s"' % (
self.actual_hash, self.expected_hash, self.download_url)
def PlatformName(name=None):
if name is None:
name = sys.platform
return PLATFORM_COLLAPSE[name]
def ArchName(name=None):
if name is None:
if PlatformName() == 'windows':
# TODO(pdox): Figure out how to auto-detect 32-bit vs 64-bit Windows.
name = 'i386'
else:
import platform
name = platform.machine()
return ARCH_COLLAPSE[name]
def EnsureFileCanBeWritten(filename):
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
def WriteData(filename, data):
EnsureFileCanBeWritten(filename)
f = open(filename, 'wb')
f.write(data)
f.close()
def WriteDataFromStream(filename, stream, chunk_size, verbose=True):
EnsureFileCanBeWritten(filename)
dst = open(filename, 'wb')
try:
while True:
data = stream.read(chunk_size)
if len(data) == 0:
break
dst.write(data)
if verbose:
# Indicate that we're still writing.
sys.stdout.write('.')
sys.stdout.flush()
finally:
if verbose:
sys.stdout.write('\n')
dst.close()
def DoesStampMatch(stampfile, expected, index):
try:
f = open(stampfile, 'r')
stamp = f.read()
f.close()
if stamp.split('\n')[index] == expected:
return "already up-to-date."
elif stamp.startswith('manual'):
return "manual override."
return False
except IOError:
return False
def WriteStamp(stampfile, data):
EnsureFileCanBeWritten(stampfile)
f = open(stampfile, 'w')
f.write(data)
f.close()
def StampIsCurrent(path, stamp_name, stamp_contents, min_time=None, index=0):
stampfile = os.path.join(path, stamp_name)
# Check if the stampfile is older than the minimum last mod time
if min_time:
try:
stamp_time = os.stat(stampfile).st_mtime
if stamp_time <= min_time:
return False
except OSError:
return False
return DoesStampMatch(stampfile, stamp_contents, index)
def WriteSourceStamp(path, url):
stampfile = os.path.join(path, SOURCE_STAMP)
WriteStamp(stampfile, url)
def WriteHashStamp(path, hash_val):
hash_stampfile = os.path.join(path, HASH_STAMP)
WriteStamp(hash_stampfile, hash_val)
def Retry(op, *args):
# Windows seems to be prone to having commands that delete files or
# directories fail. We currently do not have a complete understanding why,
# and as a workaround we simply retry the command a few times.
# It appears that file locks are hanging around longer than they should. This
# may be a secondary effect of processes hanging around longer than they
# should. This may be because when we kill a browser sel_ldr does not exit
# immediately, etc.
# Virus checkers can also accidently prevent files from being deleted, but
# that shouldn't be a problem on the bots.
if sys.platform in ('win32', 'cygwin'):
count = 0
while True:
try:
op(*args)
break
except Exception:
sys.stdout.write("FAILED: %s %s\n" % (op.__name__, repr(args)))
count += 1
if count < 5:
sys.stdout.write("RETRY: %s %s\n" % (op.__name__, repr(args)))
time.sleep(pow(2, count))
else:
# Don't mask the exception.
raise
else:
op(*args)
def MoveDirCleanly(src, dst):
RemoveDir(dst)
MoveDir(src, dst)
def MoveDir(src, dst):
Retry(shutil.move, src, dst)
def RemoveDir(path):
if os.path.exists(path):
Retry(shutil.rmtree, path)
def RemoveFile(path):
if os.path.exists(path):
Retry(os.unlink, path)
def _HashFileHandle(fh):
"""sha1 of a file like object.
Arguments:
fh: file handle like object to hash.
Returns:
sha1 as a string.
"""
hasher = hashlib.sha1()
try:
while True:
data = fh.read(4096)
if not data:
break
hasher.update(data)
finally:
fh.close()
return hasher.hexdigest()
def HashFile(filename):
"""sha1 a file on disk.
Arguments:
filename: filename to hash.
Returns:
sha1 as a string.
"""
fh = open(filename, 'rb')
return _HashFileHandle(fh)
def HashUrlByDownloading(url):
"""sha1 the data at an url.
Arguments:
url: url to download from.
Returns:
sha1 of the data at the url.
"""
try:
fh = urllib2.urlopen(url)
except:
sys.stderr.write("Failed fetching URL: %s\n" % url)
raise
return _HashFileHandle(fh)
# Attempts to get the SHA1 hash of a file given a URL by looking for
# an adjacent file with a ".sha1hash" suffix. This saves having to
# download a large tarball just to get its hash. Otherwise, we fall
# back to downloading the main file.
def HashUrl(url):
hash_url = '%s.sha1hash' % url
try:
fh = urllib2.urlopen(hash_url)
data = fh.read(100)
fh.close()
except urllib2.HTTPError, exn:
if exn.code == 404:
return HashUrlByDownloading(url)
raise
else:
if not re.match('[0-9a-f]{40}\n?$', data):
raise AssertionError('Bad SHA1 hash file: %r' % data)
return data.strip()
def SyncURL(url, filename=None, stamp_dir=None, min_time=None,
hash_val=None, keep=False, verbose=False, stamp_index=0):
"""Synchronize a destination file with a URL
if the URL does not match the URL stamp, then we must re-download it.
Arugments:
url: the url which will to compare against and download
filename: the file to create on download
path: the download path
stamp_dir: the filename containing the URL stamp to check against
hash_val: if set, the expected hash which must be matched
verbose: prints out status as it runs
stamp_index: index within the stamp file to check.
Returns:
True if the file is replaced
False if the file is not replaced
Exception:
HashError: if the hash does not match
"""
assert url and filename
# If we are not keeping the tarball, or we already have it, we can
# skip downloading it for this reason. If we are keeping it,
# it must exist.
if keep:
tarball_ok = os.path.isfile(filename)
else:
tarball_ok = True
# If we don't need the tarball and the stamp_file matches the url, then
# we must be up to date. If the URL differs but the recorded hash matches
# the one we'll insist the tarball has, then that's good enough too.
# TODO(mcgrathr): Download the .sha1sum file first to compare with
# the cached hash, in case --file-hash options weren't used.
if tarball_ok and stamp_dir is not None:
if StampIsCurrent(stamp_dir, SOURCE_STAMP, url, min_time):
if verbose:
print '%s is already up to date.' % filename
return False
if (hash_val is not None and
StampIsCurrent(stamp_dir, HASH_STAMP, hash_val, min_time, stamp_index)):
if verbose:
print '%s is identical to the up to date file.' % filename
return False
if verbose:
print 'Updating %s\n\tfrom %s.' % (filename, url)
EnsureFileCanBeWritten(filename)
http_download.HttpDownload(url, filename)
if hash_val:
tar_hash = HashFile(filename)
if hash_val != tar_hash:
raise HashError(actual_hash=tar_hash, expected_hash=hash_val,
download_url=url)
return True
|
bsd-3-clause
|
mfherbst/spack
|
lib/spack/spack/test/packaging.py
|
4
|
14935
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
This test checks the binary packaging infrastructure
"""
import os
import stat
import sys
import shutil
import pytest
import argparse
from llnl.util.filesystem import mkdirp
import spack.repo
import spack.store
import spack.binary_distribution as bindist
import spack.cmd.buildcache as buildcache
from spack.spec import Spec
from spack.paths import mock_gpg_keys_path
from spack.fetch_strategy import URLFetchStrategy, FetchStrategyComposite
from spack.util.executable import ProcessError
from spack.relocate import needs_binary_relocation, needs_text_relocation
from spack.relocate import strings_contains_installroot
from spack.relocate import get_patchelf, relocate_text
from spack.relocate import substitute_rpath, get_relative_rpaths
from spack.relocate import macho_replace_paths, macho_make_paths_relative
from spack.relocate import modify_macho_object, macho_get_paths
@pytest.fixture(scope='function')
def testing_gpg_directory(tmpdir):
old_gpg_path = spack.util.gpg.GNUPGHOME
spack.util.gpg.GNUPGHOME = str(tmpdir.join('gpg'))
yield
spack.util.gpg.GNUPGHOME = old_gpg_path
def has_gnupg2():
try:
spack.util.gpg.Gpg.gpg()('--version', output=os.devnull)
return True
except ProcessError:
return False
def fake_fetchify(url, pkg):
"""Fake the URL for a package so it downloads from a file."""
fetcher = FetchStrategyComposite()
fetcher.append(URLFetchStrategy(url))
pkg.fetcher = fetcher
@pytest.mark.usefixtures('install_mockery', 'testing_gpg_directory')
def test_buildcache(mock_archive, tmpdir):
# tweak patchelf to only do a download
spec = Spec("patchelf")
spec.concretize()
pkg = spack.repo.get(spec)
fake_fetchify(pkg.fetcher, pkg)
mkdirp(os.path.join(pkg.prefix, "bin"))
patchelfscr = os.path.join(pkg.prefix, "bin", "patchelf")
f = open(patchelfscr, 'w')
body = """#!/bin/bash
echo $PATH"""
f.write(body)
f.close()
st = os.stat(patchelfscr)
os.chmod(patchelfscr, st.st_mode | stat.S_IEXEC)
# Install the test package
spec = Spec('trivial-install-test-package')
spec.concretize()
assert spec.concrete
pkg = spec.package
fake_fetchify(mock_archive.url, pkg)
pkg.do_install()
pkghash = '/' + spec.dag_hash(7)
# Put some non-relocatable file in there
filename = os.path.join(spec.prefix, "dummy.txt")
with open(filename, "w") as script:
script.write(spec.prefix)
# Create the build cache and
# put it directly into the mirror
mirror_path = os.path.join(str(tmpdir), 'test-mirror')
spack.mirror.create(
mirror_path, specs=[], no_checksum=True
)
# register mirror with spack config
mirrors = {'spack-mirror-test': 'file://' + mirror_path}
spack.config.set('mirrors', mirrors)
stage = spack.stage.Stage(
mirrors['spack-mirror-test'], name="build_cache", keep=True)
stage.create()
# setup argument parser
parser = argparse.ArgumentParser()
buildcache.setup_parser(parser)
# Create a private key to sign package with if gpg2 available
if has_gnupg2():
spack.util.gpg.Gpg.create(name='test key 1', expires='0',
email='[email protected]',
comment='Spack test key')
# Create build cache with signing
args = parser.parse_args(['create', '-d', mirror_path, str(spec)])
buildcache.buildcache(parser, args)
# Uninstall the package
pkg.do_uninstall(force=True)
# test overwrite install
args = parser.parse_args(['install', '-f', str(pkghash)])
buildcache.buildcache(parser, args)
# create build cache with relative path and signing
args = parser.parse_args(
['create', '-d', mirror_path, '-f', '-r', str(spec)])
buildcache.buildcache(parser, args)
# Uninstall the package
pkg.do_uninstall(force=True)
# install build cache with verification
args = parser.parse_args(['install', str(spec)])
buildcache.install_tarball(spec, args)
# test overwrite install
args = parser.parse_args(['install', '-f', str(pkghash)])
buildcache.buildcache(parser, args)
else:
# create build cache without signing
args = parser.parse_args(
['create', '-d', mirror_path, '-u', str(spec)])
buildcache.buildcache(parser, args)
# Uninstall the package
pkg.do_uninstall(force=True)
# install build cache without verification
args = parser.parse_args(['install', '-u', str(spec)])
buildcache.install_tarball(spec, args)
# test overwrite install without verification
args = parser.parse_args(['install', '-f', '-u', str(pkghash)])
buildcache.buildcache(parser, args)
# create build cache with relative path
args = parser.parse_args(
['create', '-d', mirror_path, '-f', '-r', '-u', str(pkghash)])
buildcache.buildcache(parser, args)
# Uninstall the package
pkg.do_uninstall(force=True)
# install build cache
args = parser.parse_args(['install', '-u', str(spec)])
buildcache.install_tarball(spec, args)
# test overwrite install
args = parser.parse_args(['install', '-f', '-u', str(pkghash)])
buildcache.buildcache(parser, args)
# Validate the relocation information
buildinfo = bindist.read_buildinfo_file(spec.prefix)
assert(buildinfo['relocate_textfiles'] == ['dummy.txt'])
args = parser.parse_args(['list'])
buildcache.buildcache(parser, args)
args = parser.parse_args(['list', '-f'])
buildcache.buildcache(parser, args)
args = parser.parse_args(['list', 'trivial'])
buildcache.buildcache(parser, args)
# Copy a key to the mirror to have something to download
shutil.copyfile(mock_gpg_keys_path + '/external.key',
mirror_path + '/external.key')
args = parser.parse_args(['keys'])
buildcache.buildcache(parser, args)
args = parser.parse_args(['keys', '-f'])
buildcache.buildcache(parser, args)
# unregister mirror with spack config
mirrors = {}
spack.config.set('mirrors', mirrors)
shutil.rmtree(mirror_path)
stage.destroy()
def test_relocate_text(tmpdir):
with tmpdir.as_cwd():
# Validate the text path replacement
old_dir = '/home/spack/opt/spack'
filename = 'dummy.txt'
with open(filename, "w") as script:
script.write(old_dir)
script.close()
filenames = [filename]
new_dir = '/opt/rh/devtoolset/'
relocate_text(filenames, old_dir, new_dir)
with open(filename, "r")as script:
for line in script:
assert(new_dir in line)
assert(strings_contains_installroot(filename, old_dir) is False)
def test_needs_relocation():
binary_type = (
'ELF 64-bit LSB executable, x86-64, version 1 (SYSV),'
' dynamically linked (uses shared libs),'
' for GNU/Linux x.y.z, stripped')
assert needs_binary_relocation(binary_type, os_id='Linux')
assert not needs_binary_relocation('relocatable',
os_id='Linux')
assert not needs_binary_relocation('symbolic link to `foo\'',
os_id='Linux')
assert needs_text_relocation('ASCII text')
assert not needs_text_relocation('symbolic link to `foo.text\'')
macho_type = 'Mach-O 64-bit executable x86_64'
assert needs_binary_relocation(macho_type, os_id='Darwin')
def test_macho_paths():
out = macho_make_paths_relative('/Users/Shares/spack/pkgC/lib/libC.dylib',
'/Users/Shared/spack',
('/Users/Shared/spack/pkgA/lib',
'/Users/Shared/spack/pkgB/lib',
'/usr/local/lib'),
('/Users/Shared/spack/pkgA/libA.dylib',
'/Users/Shared/spack/pkgB/libB.dylib',
'/usr/local/lib/libloco.dylib'),
'/Users/Shared/spack/pkgC/lib/libC.dylib')
assert out == (['@loader_path/../../../../Shared/spack/pkgA/lib',
'@loader_path/../../../../Shared/spack/pkgB/lib',
'/usr/local/lib'],
['@loader_path/../../../../Shared/spack/pkgA/libA.dylib',
'@loader_path/../../../../Shared/spack/pkgB/libB.dylib',
'/usr/local/lib/libloco.dylib'],
'@rpath/libC.dylib')
out = macho_make_paths_relative('/Users/Shared/spack/pkgC/bin/exeC',
'/Users/Shared/spack',
('/Users/Shared/spack/pkgA/lib',
'/Users/Shared/spack/pkgB/lib',
'/usr/local/lib'),
('/Users/Shared/spack/pkgA/libA.dylib',
'/Users/Shared/spack/pkgB/libB.dylib',
'/usr/local/lib/libloco.dylib'), None)
assert out == (['@loader_path/../../pkgA/lib',
'@loader_path/../../pkgB/lib',
'/usr/local/lib'],
['@loader_path/../../pkgA/libA.dylib',
'@loader_path/../../pkgB/libB.dylib',
'/usr/local/lib/libloco.dylib'], None)
out = macho_replace_paths('/Users/Shared/spack',
'/Applications/spack',
('/Users/Shared/spack/pkgA/lib',
'/Users/Shared/spack/pkgB/lib',
'/usr/local/lib'),
('/Users/Shared/spack/pkgA/libA.dylib',
'/Users/Shared/spack/pkgB/libB.dylib',
'/usr/local/lib/libloco.dylib'),
'/Users/Shared/spack/pkgC/lib/libC.dylib')
assert out == (['/Applications/spack/pkgA/lib',
'/Applications/spack/pkgB/lib',
'/usr/local/lib'],
['/Applications/spack/pkgA/libA.dylib',
'/Applications/spack/pkgB/libB.dylib',
'/usr/local/lib/libloco.dylib'],
'/Applications/spack/pkgC/lib/libC.dylib')
out = macho_replace_paths('/Users/Shared/spack',
'/Applications/spack',
('/Users/Shared/spack/pkgA/lib',
'/Users/Shared/spack/pkgB/lib',
'/usr/local/lib'),
('/Users/Shared/spack/pkgA/libA.dylib',
'/Users/Shared/spack/pkgB/libB.dylib',
'/usr/local/lib/libloco.dylib'),
None)
assert out == (['/Applications/spack/pkgA/lib',
'/Applications/spack/pkgB/lib',
'/usr/local/lib'],
['/Applications/spack/pkgA/libA.dylib',
'/Applications/spack/pkgB/libB.dylib',
'/usr/local/lib/libloco.dylib'],
None)
def test_elf_paths():
out = get_relative_rpaths(
'/usr/bin/test', '/usr',
('/usr/lib', '/usr/lib64', '/opt/local/lib'))
assert out == ['$ORIGIN/../lib', '$ORIGIN/../lib64', '/opt/local/lib']
out = substitute_rpath(
('/usr/lib', '/usr/lib64', '/opt/local/lib'), '/usr', '/opt')
assert out == ['/opt/lib', '/opt/lib64', '/opt/local/lib']
@pytest.mark.skipif(sys.platform != 'darwin',
reason="only works with Mach-o objects")
def test_relocate_macho(tmpdir):
with tmpdir.as_cwd():
get_patchelf() # this does nothing on Darwin
rpaths, deps, idpath = macho_get_paths('/bin/bash')
nrpaths, ndeps, nid = macho_make_paths_relative('/bin/bash', '/usr',
rpaths, deps, idpath)
shutil.copyfile('/bin/bash', 'bash')
modify_macho_object('bash',
rpaths, deps, idpath,
nrpaths, ndeps, nid)
rpaths, deps, idpath = macho_get_paths('/bin/bash')
nrpaths, ndeps, nid = macho_replace_paths('/usr', '/opt',
rpaths, deps, idpath)
shutil.copyfile('/bin/bash', 'bash')
modify_macho_object('bash',
rpaths, deps, idpath,
nrpaths, ndeps, nid)
path = '/usr/lib/libncurses.5.4.dylib'
rpaths, deps, idpath = macho_get_paths(path)
nrpaths, ndeps, nid = macho_make_paths_relative(path, '/usr',
rpaths, deps, idpath)
shutil.copyfile(
'/usr/lib/libncurses.5.4.dylib', 'libncurses.5.4.dylib')
modify_macho_object('libncurses.5.4.dylib',
rpaths, deps, idpath,
nrpaths, ndeps, nid)
rpaths, deps, idpath = macho_get_paths(path)
nrpaths, ndeps, nid = macho_replace_paths('/usr', '/opt',
rpaths, deps, idpath)
shutil.copyfile(
'/usr/lib/libncurses.5.4.dylib', 'libncurses.5.4.dylib')
modify_macho_object(
'libncurses.5.4.dylib',
rpaths, deps, idpath,
nrpaths, ndeps, nid)
|
lgpl-2.1
|
teeple/pns_server
|
work/install/Python-2.7.4/Lib/bsddb/test/test_dbobj.py
|
114
|
2407
|
import os, string
import unittest
from test_all import db, dbobj, test_support, get_new_environment_path, \
get_new_database_path
#----------------------------------------------------------------------
class dbobjTestCase(unittest.TestCase):
"""Verify that dbobj.DB and dbobj.DBEnv work properly"""
db_name = 'test-dbobj.db'
def setUp(self):
self.homeDir = get_new_environment_path()
def tearDown(self):
if hasattr(self, 'db'):
del self.db
if hasattr(self, 'env'):
del self.env
test_support.rmtree(self.homeDir)
def test01_both(self):
class TestDBEnv(dbobj.DBEnv): pass
class TestDB(dbobj.DB):
def put(self, key, *args, **kwargs):
key = key.upper()
# call our parent classes put method with an upper case key
return dbobj.DB.put(self, key, *args, **kwargs)
self.env = TestDBEnv()
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
self.db = TestDB(self.env)
self.db.open(self.db_name, db.DB_HASH, db.DB_CREATE)
self.db.put('spam', 'eggs')
self.assertEqual(self.db.get('spam'), None,
"overridden dbobj.DB.put() method failed [1]")
self.assertEqual(self.db.get('SPAM'), 'eggs',
"overridden dbobj.DB.put() method failed [2]")
self.db.close()
self.env.close()
def test02_dbobj_dict_interface(self):
self.env = dbobj.DBEnv()
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
self.db = dbobj.DB(self.env)
self.db.open(self.db_name+'02', db.DB_HASH, db.DB_CREATE)
# __setitem__
self.db['spam'] = 'eggs'
# __len__
self.assertEqual(len(self.db), 1)
# __getitem__
self.assertEqual(self.db['spam'], 'eggs')
# __del__
del self.db['spam']
self.assertEqual(self.db.get('spam'), None, "dbobj __del__ failed")
self.db.close()
self.env.close()
def test03_dbobj_type_before_open(self):
# Ensure this doesn't cause a segfault.
self.assertRaises(db.DBInvalidArgError, db.DB().type)
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(dbobjTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
gpl-2.0
|
fajoy/nova
|
nova/scheduler/weights/least_cost.py
|
3
|
4693
|
# Copyright (c) 2011-2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Least Cost is an algorithm for choosing which host machines to
provision a set of resources to. The input is a WeightedHost object which
is decided upon by a set of objective-functions, called the 'cost-functions'.
The WeightedHost contains a combined weight for each cost-function.
The cost-function and weights are tabulated, and the host with the least cost
is then selected for provisioning.
NOTE(comstud): This is deprecated. One should use the RAMWeigher and/or
create other weight modules.
"""
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
least_cost_opts = [
cfg.ListOpt('least_cost_functions',
default=None,
help='Which cost functions the LeastCostScheduler should use'),
cfg.FloatOpt('noop_cost_fn_weight',
default=1.0,
help='How much weight to give the noop cost function'),
cfg.FloatOpt('compute_fill_first_cost_fn_weight',
default=None,
help='How much weight to give the fill-first cost function. '
'A negative value will reverse behavior: '
'e.g. spread-first'),
]
CONF = cfg.CONF
CONF.register_opts(least_cost_opts)
def noop_cost_fn(host_state, weight_properties):
"""Return a pre-weight cost of 1 for each host"""
return 1
def compute_fill_first_cost_fn(host_state, weight_properties):
"""Higher weights win, so we should return a lower weight
when there's more free ram available.
Note: the weight modifier for this function in default configuration
is -1.0. With -1.0 this function runs in reverse, so systems
with the most free memory will be preferred.
"""
return -host_state.free_ram_mb
def _get_cost_functions():
"""Returns a list of tuples containing weights and cost functions to
use for weighing hosts
"""
cost_fns_conf = CONF.least_cost_functions
if cost_fns_conf is None:
# The old default. This will get fixed up below.
fn_str = 'nova.scheduler.least_cost.compute_fill_first_cost_fn'
cost_fns_conf = [fn_str]
cost_fns = []
for cost_fn_str in cost_fns_conf:
short_name = cost_fn_str.split('.')[-1]
if not (short_name.startswith('compute_') or
short_name.startswith('noop')):
continue
# Fix up any old paths to the new paths
if cost_fn_str.startswith('nova.scheduler.least_cost.'):
cost_fn_str = ('nova.scheduler.weights.least_cost' +
cost_fn_str[25:])
try:
# NOTE: import_class is somewhat misnamed since
# the weighing function can be any non-class callable
# (i.e., no 'self')
cost_fn = importutils.import_class(cost_fn_str)
except ImportError:
raise exception.SchedulerCostFunctionNotFound(
cost_fn_str=cost_fn_str)
try:
flag_name = "%s_weight" % cost_fn.__name__
weight = getattr(CONF, flag_name)
except AttributeError:
raise exception.SchedulerWeightFlagNotFound(
flag_name=flag_name)
# Set the original default.
if (flag_name == 'compute_fill_first_cost_fn_weight' and
weight is None):
weight = -1.0
cost_fns.append((weight, cost_fn))
return cost_fns
def get_least_cost_weighers():
cost_functions = _get_cost_functions()
# Unfortunately we need to import this late so we don't have an
# import loop.
from nova.scheduler import weights
class _LeastCostWeigher(weights.BaseHostWeigher):
def weigh_objects(self, weighted_hosts, weight_properties):
for host in weighted_hosts:
host.weight = sum(weight * fn(host.obj, weight_properties)
for weight, fn in cost_functions)
return [_LeastCostWeigher]
|
apache-2.0
|
HousekeepLtd/django
|
django/template/smartif.py
|
275
|
6643
|
"""
Parser and utilities for the smart 'if' tag
"""
import warnings
from django.utils.deprecation import RemovedInDjango110Warning
# Using a simple top down parser, as described here:
# http://effbot.org/zone/simple-top-down-parsing.htm.
# 'led' = left denotation
# 'nud' = null denotation
# 'bp' = binding power (left = lbp, right = rbp)
class TokenBase(object):
"""
Base class for operators and literals, mainly for debugging and for throwing
syntax errors.
"""
id = None # node/token type name
value = None # used by literals
first = second = None # used by tree nodes
def nud(self, parser):
# Null denotation - called in prefix context
raise parser.error_class(
"Not expecting '%s' in this position in if tag." % self.id
)
def led(self, left, parser):
# Left denotation - called in infix context
raise parser.error_class(
"Not expecting '%s' as infix operator in if tag." % self.id
)
def display(self):
"""
Returns what to display in error messages for this node
"""
return self.id
def __repr__(self):
out = [str(x) for x in [self.id, self.first, self.second] if x is not None]
return "(" + " ".join(out) + ")"
def infix(bp, func):
"""
Creates an infix operator, given a binding power and a function that
evaluates the node
"""
class Operator(TokenBase):
lbp = bp
def led(self, left, parser):
self.first = left
self.second = parser.expression(bp)
return self
def eval(self, context):
try:
return func(context, self.first, self.second)
except Exception:
# Templates shouldn't throw exceptions when rendering. We are
# most likely to get exceptions for things like {% if foo in bar
# %} where 'bar' does not support 'in', so default to False
return False
return Operator
def prefix(bp, func):
"""
Creates a prefix operator, given a binding power and a function that
evaluates the node.
"""
class Operator(TokenBase):
lbp = bp
def nud(self, parser):
self.first = parser.expression(bp)
self.second = None
return self
def eval(self, context):
try:
return func(context, self.first)
except Exception:
return False
return Operator
# Operator precedence follows Python.
# NB - we can get slightly more accurate syntax error messages by not using the
# same object for '==' and '='.
# We defer variable evaluation to the lambda to ensure that terms are
# lazily evaluated using Python's boolean parsing logic.
OPERATORS = {
'or': infix(6, lambda context, x, y: x.eval(context) or y.eval(context)),
'and': infix(7, lambda context, x, y: x.eval(context) and y.eval(context)),
'not': prefix(8, lambda context, x: not x.eval(context)),
'in': infix(9, lambda context, x, y: x.eval(context) in y.eval(context)),
'not in': infix(9, lambda context, x, y: x.eval(context) not in y.eval(context)),
# This should be removed in Django 1.10:
'=': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),
'==': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),
'!=': infix(10, lambda context, x, y: x.eval(context) != y.eval(context)),
'>': infix(10, lambda context, x, y: x.eval(context) > y.eval(context)),
'>=': infix(10, lambda context, x, y: x.eval(context) >= y.eval(context)),
'<': infix(10, lambda context, x, y: x.eval(context) < y.eval(context)),
'<=': infix(10, lambda context, x, y: x.eval(context) <= y.eval(context)),
}
# Assign 'id' to each:
for key, op in OPERATORS.items():
op.id = key
class Literal(TokenBase):
"""
A basic self-resolvable object similar to a Django template variable.
"""
# IfParser uses Literal in create_var, but TemplateIfParser overrides
# create_var so that a proper implementation that actually resolves
# variables, filters etc is used.
id = "literal"
lbp = 0
def __init__(self, value):
self.value = value
def display(self):
return repr(self.value)
def nud(self, parser):
return self
def eval(self, context):
return self.value
def __repr__(self):
return "(%s %r)" % (self.id, self.value)
class EndToken(TokenBase):
lbp = 0
def nud(self, parser):
raise parser.error_class("Unexpected end of expression in if tag.")
EndToken = EndToken()
class IfParser(object):
error_class = ValueError
def __init__(self, tokens):
# pre-pass necessary to turn 'not','in' into single token
l = len(tokens)
mapped_tokens = []
i = 0
while i < l:
token = tokens[i]
if token == "not" and i + 1 < l and tokens[i + 1] == "in":
token = "not in"
i += 1 # skip 'in'
mapped_tokens.append(self.translate_token(token))
i += 1
self.tokens = mapped_tokens
self.pos = 0
self.current_token = self.next_token()
def translate_token(self, token):
try:
op = OPERATORS[token]
except (KeyError, TypeError):
return self.create_var(token)
else:
if token == '=':
warnings.warn(
"Operator '=' is deprecated and will be removed in Django 1.10. Use '==' instead.",
RemovedInDjango110Warning, stacklevel=2
)
return op()
def next_token(self):
if self.pos >= len(self.tokens):
return EndToken
else:
retval = self.tokens[self.pos]
self.pos += 1
return retval
def parse(self):
retval = self.expression()
# Check that we have exhausted all the tokens
if self.current_token is not EndToken:
raise self.error_class("Unused '%s' at end of if expression." %
self.current_token.display())
return retval
def expression(self, rbp=0):
t = self.current_token
self.current_token = self.next_token()
left = t.nud(self)
while rbp < self.current_token.lbp:
t = self.current_token
self.current_token = self.next_token()
left = t.led(left, self)
return left
def create_var(self, value):
return Literal(value)
|
bsd-3-clause
|
alexanderfefelov/nav
|
python/nav/topology/analyze.py
|
1
|
11281
|
#
# Copyright (C) 2011,2012 UNINETT AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Reduction of network adjacency candidates graph.
This module uses NetworkX to facilitate reduction of NAV's network adjacency
candidates graph (loaded from the adjacency_candidate table) into a proper
physical topology graph.
The adjacency_candidate_netbox table can be loaded as a directed graph, from
which reduction can take place.
The graph can be analyzed and reduced by using the AdjacencyReducer class.
Graph structure
===============
A proper adjacency graph must consist only of Box and Port objects, as defined
in this module.
Netbox nodes must only have outgoing edges to Port nodes representing the
Netbox' own interfaces.
Port nodes can have outgoing edges to other Port nodes, or to Netbox nodes
"""
# pylint: disable=R0903
from itertools import groupby
from operator import attrgetter
import networkx as nx
from nav.models.manage import AdjacencyCandidate
import logging
_logger = logging.getLogger(__name__)
# Data classes
class Box(int):
"""A Netbox' netboxid value"""
name = None
def __str__(self):
return self.name or super(Box, self).__str__()
class Port(tuple):
"""An Interface's (netboxid, interfaceid) values"""
name = None
def __str__(self):
return self.name or super(Port, self).__str__()
# Analyzers
class AdjacencyAnalyzer(object):
"""Adjacency candidate graph analyzer and manipulator"""
def __init__(self, graph):
self.graph = graph
def get_max_out_degree(self):
"""Returns the port node with the highest outgoing degree"""
ports_and_degree = self.get_ports_and_degree()
maximum = max(ports_and_degree) if ports_and_degree else None
if maximum:
return maximum[0]
else:
return 0
def get_ports_ordered_by_degree(self):
"""Return a list of port nodes from the Graph, ordered by degree."""
ports_and_degree = self.get_ports_and_degree()
ports_and_degree.sort()
return [port for _degree, port in ports_and_degree]
def find_return_path(self, edge):
"""Find a return path starting along edge from a port node.
In the typical network adjacency candidates graph, a return path will
consist of no more than 5 nodes, including the source node at both
ends of the path.
"""
from_port, to_thing = edge
from_netbox = from_port[0]
# Initial known element of the path
path = [from_port]
if type(to_thing) is Box:
# Remote was a netbox, so we need to check each of the netbox'
# outgoing ports to find a path
remote_ports = [e[1] for e in self.graph.edges(to_thing)]
else:
remote_ports = [to_thing]
for remote_port in remote_ports:
for remote_edge in self.graph.edges(remote_port):
remote_thing = remote_edge[1]
if type(remote_thing) is Box and remote_thing == from_netbox:
path.extend([remote_port, remote_thing, from_port])
return path
elif remote_thing == from_port:
path.extend([remote_port, remote_thing])
return path
return []
@staticmethod
def get_distinct_ports(path):
"""Return a distinct list of ports listed in path"""
result = []
for node in path:
if type(node) is Port and node not in result:
result.append(node)
return result
def connect_ports(self, i, j):
"""Remove existing arcs from a and b and connect them.
a's outgoing edges are replaced by a single outgoing edge to b.
b's outgoing edges are replaced by a single outgoing edge to a.
Oncee two ports are connected, incoming edges from other ports are no
longer relevant and are deleted.
"""
self._delete_edges(i)
self._delete_edges(j)
self.delete_incoming_edges_from_ports(i)
self.delete_incoming_edges_from_ports(j)
self.graph.add_edge(i, j)
self.graph.add_edge(j, i)
def _delete_edges(self, node):
"""Deletes all outgoing edges from node"""
# this stupidity is here to support the changing NetworkX APIs
if hasattr(self.graph, 'delete_edges_from'):
self.graph.delete_edges_from(self.graph.edges(node))
else:
self.graph[node].clear()
def delete_incoming_edges_from_ports(self, node):
"""Deletes all edges coming in from ports to node"""
edges_from_ports = [(u, v) for u, v in self.graph.in_edges(node)
if type(u) is Port]
if hasattr(self.graph, 'delete_edges_from'):
self.graph.delete_edges_from(edges_from_ports)
else:
self.graph.remove_edges_from(edges_from_ports)
def get_ports_and_degree(self):
"""Return a list of port nodes and their outgoing degrees.
Result:
A list of tuples: [(degree, node), ... ]
"""
result = [(self.graph.out_degree(n), n)
for n in self.graph.nodes()
if type(n) is Port]
return result
def format_connections(self):
"""Returns a formatted string representation of all outgoing edges
from ports.
"""
output = ["%s => %s" % (source, dest)
for source, dest in self.get_single_edges_from_ports()]
output.sort()
return "\n".join(output)
def get_single_edges_from_ports(self):
"""Returns a list of edges from ports whose degree is 1"""
edges = [self.graph.edges(port)[0]
for port in self.get_ports_by_degree(1)]
return edges
def get_ports_by_degree(self, degree):
"""Returns a list of port nodes with a given out_degree"""
port_nodes = [n for n in self.graph.nodes()
if type(n) is Port and self.graph.out_degree(n) == degree]
return port_nodes
def port_in_degree(self, port):
"""Returns the in_degree of the port node, only counting outgoing
edges from ports, not boxes.
"""
return len([(u, v) for (u, v) in self.graph.in_edges(port)
if type(u) is Port])
def get_incomplete_ports(self):
"""Return a list of port nodes whose outgoing edges have not been
successfully reduced to one.
"""
ports_and_degree = self.get_ports_and_degree()
return [port for degree, port in ports_and_degree
if degree > 1]
def get_boxes_without_ports(self):
"""Return a list of netboxes that have no outgoing edges."""
result = [n for n in self.graph.nodes()
if type(n) is Box and
self.graph.out_degree(n) == 0]
return result
class AdjacencyReducer(AdjacencyAnalyzer):
"""Adjacency candidate graph reducer"""
def reduce(self):
"""Reduces the associated graph.
This will reduce the graph as much as possible. After the graph has
been reduced, any port (tuple) node with an out_degree of 1 should be
ready to store as part of the physical topology.
"""
max_degree = self.get_max_out_degree()
degree = 1
visited = set()
while degree <= max_degree:
unvisited = self._get_unvisited_by_degree(degree, visited)
if len(unvisited) == 0:
degree += 1
continue
self._visit_unvisited(unvisited, visited)
def _get_unvisited_by_degree(self, degree, visited):
ports = set(self.get_ports_by_degree(degree))
return ports.difference(visited)
def _visit_unvisited(self, unvisited, visited):
for port in unvisited:
for source, dest in self.graph.edges(port):
if (self.graph.out_degree(source) == 1 and
type(dest) is Port):
self.connect_ports(source, dest)
visited.add(dest)
else:
path = self.find_return_path((source, dest))
if path:
ports = self.get_distinct_ports(path)
if len(ports) == 2:
i, j = ports
self.connect_ports(i, j)
else:
_logger.warning("A possible self-loop was found: "
"%r", ports)
i, j = (ports[0], ports[0])
visited.update((i, j))
break
visited.add(port)
# Graph builder functions
def build_candidate_graph_from_db():
"""Builds and returns a DiGraph conforming to the requirements of an
AdjacencyAnalyzer, based on data found in the adjacency_candidate database
table.
"""
acs = AdjacencyCandidate.objects.select_related(
'netbox', 'interface', 'to_netbox', 'to_interface')
acs = _filter_by_source(acs)
graph = nx.DiGraph(name="network adjacency candidates")
for cand in acs:
if cand.to_interface:
dest_node = Port((cand.to_netbox.id,
cand.to_interface.id))
dest_node.name = "%s (%s)" % (cand.to_netbox.sysname,
cand.to_interface.ifname)
else:
dest_node = Box(cand.to_netbox.id)
dest_node.name = cand.to_netbox.sysname
port = Port((cand.netbox.id, cand.interface.id))
port.name = "%s (%s)" % (cand.netbox.sysname, cand.interface.ifname)
netbox = Box(cand.netbox.id)
netbox.name = cand.netbox.sysname
graph.add_edge(port, dest_node)
graph.add_edge(netbox, port)
return graph
CDP = 'cdp'
LLDP = 'lldp'
def _filter_by_source(all_candidates):
"""Filters candidates from list based on their source.
For each interface, LLDP is preferred over CDP, CDP is preferred over
anything else.
"""
key = attrgetter('interface.id')
all_candidates = sorted(all_candidates, key=key)
by_ifc = groupby(all_candidates, key)
for _ifc, candidates in by_ifc:
candidates = list(candidates)
sources = set(c.source for c in candidates)
if LLDP in sources:
candidates = (c for c in candidates if c.source == LLDP)
elif CDP in sources:
candidates = (c for c in candidates if c.source == CDP)
for candidate in candidates:
yield candidate
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.