repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Jadaw1n/letsencrypt | acme/acme/messages.py | 18 | 13945 | """ACME protocol messages."""
import collections
from acme import challenges
from acme import fields
from acme import jose
from acme import util
class Error(jose.JSONObjectWithFields, Exception):
"""ACME error.
https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00
:ivar unicode typ:
:ivar unicode title:
:ivar unicode detail:
"""
ERROR_TYPE_NAMESPACE = 'urn:acme:error:'
ERROR_TYPE_DESCRIPTIONS = {
'badCSR': 'The CSR is unacceptable (e.g., due to a short key)',
'badNonce': 'The client sent an unacceptable anti-replay nonce',
'connection': 'The server could not connect to the client for DV',
'dnssec': 'The server could not validate a DNSSEC signed domain',
'malformed': 'The request message was malformed',
'serverInternal': 'The server experienced an internal error',
'tls': 'The server experienced a TLS error during DV',
'unauthorized': 'The client lacks sufficient authorization',
'unknownHost': 'The server could not resolve a domain name',
}
typ = jose.Field('type')
title = jose.Field('title', omitempty=True)
detail = jose.Field('detail')
@typ.encoder
def typ(value): # pylint: disable=missing-docstring,no-self-argument
return Error.ERROR_TYPE_NAMESPACE + value
@typ.decoder
def typ(value): # pylint: disable=missing-docstring,no-self-argument
# pylint thinks isinstance(value, Error), so startswith is not found
# pylint: disable=no-member
if not value.startswith(Error.ERROR_TYPE_NAMESPACE):
raise jose.DeserializationError('Missing error type prefix')
without_prefix = value[len(Error.ERROR_TYPE_NAMESPACE):]
if without_prefix not in Error.ERROR_TYPE_DESCRIPTIONS:
raise jose.DeserializationError('Error type not recognized')
return without_prefix
@property
def description(self):
"""Hardcoded error description based on its type.
:rtype: unicode
"""
return self.ERROR_TYPE_DESCRIPTIONS[self.typ]
def __str__(self):
if self.typ is not None:
return ' :: '.join([self.typ, self.description, self.detail])
else:
return str(self.detail)
class _Constant(jose.JSONDeSerializable, collections.Hashable):
"""ACME constant."""
__slots__ = ('name',)
POSSIBLE_NAMES = NotImplemented
def __init__(self, name):
self.POSSIBLE_NAMES[name] = self
self.name = name
def to_partial_json(self):
return self.name
@classmethod
def from_json(cls, value):
if value not in cls.POSSIBLE_NAMES:
raise jose.DeserializationError(
'{0} not recognized'.format(cls.__name__))
return cls.POSSIBLE_NAMES[value]
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self.name)
def __eq__(self, other):
return isinstance(other, type(self)) and other.name == self.name
def __hash__(self):
return hash((self.__class__, self.name))
def __ne__(self, other):
return not self == other
class Status(_Constant):
"""ACME "status" field."""
POSSIBLE_NAMES = {}
STATUS_UNKNOWN = Status('unknown')
STATUS_PENDING = Status('pending')
STATUS_PROCESSING = Status('processing')
STATUS_VALID = Status('valid')
STATUS_INVALID = Status('invalid')
STATUS_REVOKED = Status('revoked')
class IdentifierType(_Constant):
"""ACME identifier type."""
POSSIBLE_NAMES = {}
IDENTIFIER_FQDN = IdentifierType('dns') # IdentifierDNS in Boulder
class Identifier(jose.JSONObjectWithFields):
"""ACME identifier.
:ivar IdentifierType typ:
:ivar unicode value:
"""
typ = jose.Field('type', decoder=IdentifierType.from_json)
value = jose.Field('value')
class Directory(jose.JSONDeSerializable):
"""Directory."""
_REGISTERED_TYPES = {}
@classmethod
def _canon_key(cls, key):
return getattr(key, 'resource_type', key)
@classmethod
def register(cls, resource_body_cls):
"""Register resource."""
assert resource_body_cls.resource_type not in cls._REGISTERED_TYPES
cls._REGISTERED_TYPES[resource_body_cls.resource_type] = resource_body_cls
return resource_body_cls
def __init__(self, jobj):
canon_jobj = util.map_keys(jobj, self._canon_key)
if not set(canon_jobj).issubset(self._REGISTERED_TYPES):
# TODO: acme-spec is not clear about this: 'It is a JSON
# dictionary, whose keys are the "resource" values listed
# in {{https-requests}}'z
raise ValueError('Wrong directory fields')
# TODO: check that everything is an absolute URL; acme-spec is
# not clear on that
self._jobj = canon_jobj
def __getattr__(self, name):
try:
return self[name.replace('_', '-')]
except KeyError as error:
raise AttributeError(str(error))
def __getitem__(self, name):
try:
return self._jobj[self._canon_key(name)]
except KeyError:
raise KeyError('Directory field not found')
def to_partial_json(self):
return self._jobj
@classmethod
def from_json(cls, jobj):
try:
return cls(jobj)
except ValueError as error:
raise jose.DeserializationError(str(error))
class Resource(jose.JSONObjectWithFields):
"""ACME Resource.
:ivar acme.messages.ResourceBody body: Resource body.
"""
body = jose.Field('body')
class ResourceWithURI(Resource):
"""ACME Resource with URI.
:ivar unicode uri: Location of the resource.
"""
uri = jose.Field('uri') # no ChallengeResource.uri
class ResourceBody(jose.JSONObjectWithFields):
"""ACME Resource Body."""
class Registration(ResourceBody):
"""Registration Resource Body.
:ivar acme.jose.jwk.JWK key: Public key.
:ivar tuple contact: Contact information following ACME spec,
`tuple` of `unicode`.
:ivar unicode agreement:
:ivar unicode authorizations: URI where
`messages.Registration.Authorizations` can be found.
:ivar unicode certificates: URI where
`messages.Registration.Certificates` can be found.
"""
# on new-reg key server ignores 'key' and populates it based on
# JWS.signature.combined.jwk
key = jose.Field('key', omitempty=True, decoder=jose.JWK.from_json)
contact = jose.Field('contact', omitempty=True, default=())
agreement = jose.Field('agreement', omitempty=True)
authorizations = jose.Field('authorizations', omitempty=True)
certificates = jose.Field('certificates', omitempty=True)
class Authorizations(jose.JSONObjectWithFields):
"""Authorizations granted to Account in the process of registration.
:ivar tuple authorizations: URIs to Authorization Resources.
"""
authorizations = jose.Field('authorizations')
class Certificates(jose.JSONObjectWithFields):
"""Certificates granted to Account in the process of registration.
:ivar tuple certificates: URIs to Certificate Resources.
"""
certificates = jose.Field('certificates')
phone_prefix = 'tel:'
email_prefix = 'mailto:'
@classmethod
def from_data(cls, phone=None, email=None, **kwargs):
"""Create registration resource from contact details."""
details = list(kwargs.pop('contact', ()))
if phone is not None:
details.append(cls.phone_prefix + phone)
if email is not None:
details.append(cls.email_prefix + email)
kwargs['contact'] = tuple(details)
return cls(**kwargs)
def _filter_contact(self, prefix):
return tuple(
detail[len(prefix):] for detail in self.contact
if detail.startswith(prefix))
@property
def phones(self):
"""All phones found in the ``contact`` field."""
return self._filter_contact(self.phone_prefix)
@property
def emails(self):
"""All emails found in the ``contact`` field."""
return self._filter_contact(self.email_prefix)
@Directory.register
class NewRegistration(Registration):
"""New registration."""
resource_type = 'new-reg'
resource = fields.Resource(resource_type)
class UpdateRegistration(Registration):
"""Update registration."""
resource_type = 'reg'
resource = fields.Resource(resource_type)
class RegistrationResource(ResourceWithURI):
"""Registration Resource.
:ivar acme.messages.Registration body:
:ivar unicode new_authzr_uri: URI found in the 'next' ``Link`` header
:ivar unicode terms_of_service: URL for the CA TOS.
"""
body = jose.Field('body', decoder=Registration.from_json)
new_authzr_uri = jose.Field('new_authzr_uri')
terms_of_service = jose.Field('terms_of_service', omitempty=True)
class ChallengeBody(ResourceBody):
"""Challenge Resource Body.
.. todo::
Confusingly, this has a similar name to `.challenges.Challenge`,
as well as `.achallenges.AnnotatedChallenge`. Please use names
such as ``challb`` to distinguish instances of this class from
``achall``.
:ivar acme.challenges.Challenge: Wrapped challenge.
Conveniently, all challenge fields are proxied, i.e. you can
call ``challb.x`` to get ``challb.chall.x`` contents.
:ivar acme.messages.Status status:
:ivar datetime.datetime validated:
:ivar messages.Error error:
"""
__slots__ = ('chall',)
uri = jose.Field('uri')
status = jose.Field('status', decoder=Status.from_json,
omitempty=True, default=STATUS_PENDING)
validated = fields.RFC3339Field('validated', omitempty=True)
error = jose.Field('error', decoder=Error.from_json,
omitempty=True, default=None)
def to_partial_json(self):
jobj = super(ChallengeBody, self).to_partial_json()
jobj.update(self.chall.to_partial_json())
return jobj
@classmethod
def fields_from_json(cls, jobj):
jobj_fields = super(ChallengeBody, cls).fields_from_json(jobj)
jobj_fields['chall'] = challenges.Challenge.from_json(jobj)
return jobj_fields
def __getattr__(self, name):
return getattr(self.chall, name)
class ChallengeResource(Resource):
"""Challenge Resource.
:ivar acme.messages.ChallengeBody body:
:ivar unicode authzr_uri: URI found in the 'up' ``Link`` header.
"""
body = jose.Field('body', decoder=ChallengeBody.from_json)
authzr_uri = jose.Field('authzr_uri')
@property
def uri(self): # pylint: disable=missing-docstring,no-self-argument
# bug? 'method already defined line None'
# pylint: disable=function-redefined
return self.body.uri # pylint: disable=no-member
class Authorization(ResourceBody):
"""Authorization Resource Body.
:ivar acme.messages.Identifier identifier:
:ivar list challenges: `list` of `.ChallengeBody`
:ivar tuple combinations: Challenge combinations (`tuple` of `tuple`
of `int`, as opposed to `list` of `list` from the spec).
:ivar acme.messages.Status status:
:ivar datetime.datetime expires:
"""
identifier = jose.Field('identifier', decoder=Identifier.from_json)
challenges = jose.Field('challenges', omitempty=True)
combinations = jose.Field('combinations', omitempty=True)
status = jose.Field('status', omitempty=True, decoder=Status.from_json)
# TODO: 'expires' is allowed for Authorization Resources in
# general, but for Key Authorization '[t]he "expires" field MUST
# be absent'... then acme-spec gives example with 'expires'
# present... That's confusing!
expires = fields.RFC3339Field('expires', omitempty=True)
@challenges.decoder
def challenges(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(ChallengeBody.from_json(chall) for chall in value)
@property
def resolved_combinations(self):
"""Combinations with challenges instead of indices."""
return tuple(tuple(self.challenges[idx] for idx in combo)
for combo in self.combinations)
@Directory.register
class NewAuthorization(Authorization):
"""New authorization."""
resource_type = 'new-authz'
resource = fields.Resource(resource_type)
class AuthorizationResource(ResourceWithURI):
"""Authorization Resource.
:ivar acme.messages.Authorization body:
:ivar unicode new_cert_uri: URI found in the 'next' ``Link`` header
"""
body = jose.Field('body', decoder=Authorization.from_json)
new_cert_uri = jose.Field('new_cert_uri')
@Directory.register
class CertificateRequest(jose.JSONObjectWithFields):
"""ACME new-cert request.
:ivar acme.jose.util.ComparableX509 csr:
`OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`
"""
resource_type = 'new-cert'
resource = fields.Resource(resource_type)
csr = jose.Field('csr', decoder=jose.decode_csr, encoder=jose.encode_csr)
class CertificateResource(ResourceWithURI):
"""Certificate Resource.
:ivar acme.jose.util.ComparableX509 body:
`OpenSSL.crypto.X509` wrapped in `.ComparableX509`
:ivar unicode cert_chain_uri: URI found in the 'up' ``Link`` header
:ivar tuple authzrs: `tuple` of `AuthorizationResource`.
"""
cert_chain_uri = jose.Field('cert_chain_uri')
authzrs = jose.Field('authzrs')
@Directory.register
class Revocation(jose.JSONObjectWithFields):
"""Revocation message.
:ivar .ComparableX509 certificate: `OpenSSL.crypto.X509` wrapped in
`.ComparableX509`
"""
resource_type = 'revoke-cert'
resource = fields.Resource(resource_type)
certificate = jose.Field(
'certificate', decoder=jose.decode_cert, encoder=jose.encode_cert)
| apache-2.0 |
ephes/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
elfi-dev/elfi | elfi/examples/daycare.py | 2 | 10847 | """Example of inference of transmission dynamics of bacteria in day care centers.
Treatment roughly follows:
Numminen, E., Cheng, L., Gyllenberg, M. and Corander, J.: Estimating the transmission dynamics
of Streptococcus pneumoniae from strain prevalence data, Biometrics, 69, 748-757, 2013.
"""
import logging
from functools import partial
import numpy as np
import elfi
def daycare(t1, t2, t3, n_dcc=29, n_ind=53, n_strains=33, freq_strains_commun=None,
n_obs=36, time_end=10., batch_size=1, random_state=None):
r"""Generate cross-sectional data from a stochastic variant of the SIS-model.
This function simulates the transmission dynamics of bacterial infections in daycare centers
(DCC) as described in Nummelin et al. [2013]. The observation model is however simplified to
an equal number of sampled individuals among the daycare centers.
The model is defined as a continuous-time Markov process with transition probabilities:
Pr(I_{is}(t+dt)=1 | I_{is}(t)=0) = t1 * E_s(I(t)) + t2 * P_s, if \sum_{j=1}^N_s I_{ij}(t)=0
Pr(I_{is}(t+dt)=1 | I_{is}(t)=0) = t3 * (t1 * E_s(I(t)) + t2 * P_s), otherwise
Pr(I_{is}(t+dt)=0 | I_{is}(t)=1) = \gamma
where:
I_{is}(t) is the status of carriage of strain s for individual i.
E_s(I(t)) is the probability of sampling the strain s
t1 is the rate of transmission from other children at the DCC (\beta in paper).
t2 is the rate of transmission from the community outside the DCC (\Lambda in paper).
t3 scales the rate of an infected child being infected with another strain (\theta in paper).
\gamma is the relative probability of healing from a strain.
As in the paper, \gamma=1, and the other inferred parameters are relative to it.
The system is solved using the Direct method [Gillespie, 1977].
References
----------
Numminen, E., Cheng, L., Gyllenberg, M. and Corander, J. (2013) Estimating the transmission
dynamics of Streptococcus pneumoniae from strain prevalence data, Biometrics, 69, 748-757.
Gillespie, D. T. (1977) Exact stochastic simulation of coupled chemical reactions.
The Journal of Physical Chemistry 81 (25), 2340–2361.
Parameters
----------
t1 : float or np.array
Rate of transmission from other individuals at the DCC.
t2 : float or np.array
Rate of transmission from the community outside the DCC.
t3 : float or np.array
Scaling of co-infection for individuals infected with another strain.
n_dcc : int, optional
Number of daycare centers.
n_ind : int, optional
Number of individuals in a DCC (same for all).
n_strains : int, optional
Number of bacterial strains considered.
freq_strains_commun : np.array of shape (n_strains,), optional
Prevalence of each strain in the community outside the DCC. Defaults to 0.1.
n_obs : int, optional
Number of individuals sampled from each DCC (same for all).
time_end : float, optional
The system is solved using the Direct method until all cases within the batch exceed this.
batch_size : int, optional
random_state : np.random.RandomState, optional
Returns
-------
state_obs : np.array
Observations in shape (batch_size, n_dcc, n_obs, n_strains).
"""
random_state = random_state or np.random
t1 = np.asanyarray(t1).reshape((-1, 1, 1, 1))
t2 = np.asanyarray(t2).reshape((-1, 1, 1, 1))
t3 = np.asanyarray(t3).reshape((-1, 1, 1, 1))
if freq_strains_commun is None:
freq_strains_commun = np.full(n_strains, 0.1)
prob_commun = t2 * freq_strains_commun
# the state (infection status) is a 4D tensor for computational performance
state = np.zeros((batch_size, n_dcc, n_ind, n_strains), dtype=np.bool)
# time for each DCC in the batch
time = np.zeros((batch_size, n_dcc))
n_factor = 1. / (n_ind - 1)
gamma = 1. # relative, see paper
ind_b_dcc = [np.repeat(np.arange(batch_size), n_dcc), np.tile(np.arange(n_dcc), batch_size)]
while np.any(time < time_end):
with np.errstate(divide='ignore', invalid='ignore'):
# probability of sampling a strain; in paper: E_s(I(t))
prob_strain_adjust = np.nan_to_num(state / np.sum(state, axis=3, keepdims=True))
prob_strain = np.sum(prob_strain_adjust, axis=2, keepdims=True)
# Which individuals are already infected:
intrainfect_rate = t1 * (np.tile(prob_strain, (1, 1, n_ind, 1)) -
prob_strain_adjust) * n_factor + 1e-9
# init prob to get infected, same for all
hazards = intrainfect_rate + prob_commun # shape (batch_size, n_dcc, 1, n_strains)
# co-infection, depends on the individual's state
# hazards = np.tile(hazards, (1, 1, n_ind, 1))
any_infection = np.any(state, axis=3, keepdims=True)
hazards = np.where(any_infection, t3 * hazards, hazards)
# (relative) probability to be cured
hazards[state] = gamma
# normalize to probabilities
inv_sum_hazards = 1. / np.sum(hazards, axis=(2, 3), keepdims=True)
probs = hazards * inv_sum_hazards
# times until next transition (for each DCC in the batch)
delta_t = random_state.exponential(inv_sum_hazards[:, :, 0, 0])
time = time + delta_t
# choose transition
probs = probs.reshape((batch_size, n_dcc, -1))
cumprobs = np.cumsum(probs[:, :, :-1], axis=2)
x = random_state.uniform(size=(batch_size, n_dcc, 1))
ind_transit = np.sum(x >= cumprobs, axis=2)
# update state, need to find the correct indices first
ind_transit = ind_b_dcc + list(np.unravel_index(ind_transit.ravel(), (n_ind, n_strains)))
state[ind_transit] = np.logical_not(state[ind_transit])
# observation model: simply take the first n_obs individuals
state_obs = state[:, :, :n_obs, :]
return state_obs
def get_model(true_params=None, seed_obs=None, **kwargs):
"""Return a complete ELFI graph ready for inference.
Selection of true values, priors etc. follows the approach in
Numminen, E., Cheng, L., Gyllenberg, M. and Corander, J.: Estimating the transmission dynamics
of Streptococcus pneumoniae from strain prevalence data, Biometrics, 69, 748-757, 2013.
and
Gutmann M U, Corander J (2016). Bayesian Optimization for Likelihood-Free Inference
of Simulator-Based Statistical Models. JMLR 17(125):1−47, 2016.
Parameters
----------
true_params : list, optional
Parameters with which the observed data is generated.
seed_obs : int, optional
Seed for the observed data generation.
Returns
-------
m : elfi.ElfiModel
"""
logger = logging.getLogger()
if true_params is None:
true_params = [3.6, 0.6, 0.1]
m = elfi.ElfiModel()
y_obs = daycare(*true_params, random_state=np.random.RandomState(seed_obs), **kwargs)
sim_fn = partial(daycare, **kwargs)
priors = []
sumstats = []
priors.append(elfi.Prior('uniform', 0, 11, model=m, name='t1'))
priors.append(elfi.Prior('uniform', 0, 2, model=m, name='t2'))
priors.append(elfi.Prior('uniform', 0, 1, model=m, name='t3'))
elfi.Simulator(sim_fn, *priors, observed=y_obs, name='DCC')
sumstats.append(elfi.Summary(ss_shannon, m['DCC'], name='Shannon'))
sumstats.append(elfi.Summary(ss_strains, m['DCC'], name='n_strains'))
sumstats.append(elfi.Summary(ss_prevalence, m['DCC'], name='prevalence'))
sumstats.append(elfi.Summary(ss_prevalence_multi, m['DCC'], name='multi'))
elfi.Discrepancy(distance, *sumstats, name='d')
elfi.Operation(np.log, m['d'], name='logd')
logger.info("Generated observations with true parameters "
"t1: %.1f, t2: %.3f, t3: %.1f, ", *true_params)
return m
def ss_shannon(data):
r"""Calculate the Shannon index of diversity of the distribution of observed strains.
H = -\sum p \log(p)
https://en.wikipedia.org/wiki/Diversity_index#Shannon_index
Parameters
----------
data : np.array of shape (batch_size, n_dcc, n_obs, n_strains)
Returns
-------
np.array of shape (batch_size, n_dcc)
"""
total_obs = np.sum(data, axis=2, keepdims=True)
with np.errstate(divide='ignore', invalid='ignore'):
proportions = np.nan_to_num(total_obs / np.sum(total_obs, axis=3, keepdims=True))
proportions[proportions == 0] = 1
shannon = (-np.sum(proportions * np.log(proportions), axis=3))[:, :, 0]
return shannon
def ss_strains(data):
"""Calculate the number of different strains observed.
Parameters
----------
data : np.array of shape (batch_size, n_dcc, n_obs, n_strains)
Returns
-------
np.array of shape (batch_size, n_dcc)
"""
strain_active = np.any(data, axis=2)
n_strain_obs = np.sum(strain_active, axis=2) # axis 3 is now 2
return n_strain_obs
def ss_prevalence(data):
"""Calculate the prevalence of carriage among the observed individuals.
Parameters
----------
data : np.array of shape (batch_size, n_dcc, n_obs, n_strains)
Returns
-------
np.array of shape (batch_size, n_dcc)
"""
any_infection = np.any(data, axis=3)
n_infected = np.sum(any_infection, axis=2)
return n_infected / data.shape[2]
def ss_prevalence_multi(data):
"""Calculate the prevalence of multiple infections among the observed individuals.
Parameters
----------
data : np.array of shape (batch_size, n_dcc, n_obs, n_strains)
Returns
-------
np.array of shape (batch_size, n_dcc)
"""
n_infections = np.sum(data, axis=3)
n_multi_infections = np.sum(n_infections > 1, axis=2)
return n_multi_infections / data.shape[2]
def distance(*summaries, observed):
"""Calculate an L1-based distance between the simulated and observed summaries.
Follows the simplified single-distance approach in:
Gutmann M U, Corander J (2016). Bayesian Optimization for Likelihood-Free Inference
of Simulator-Based Statistical Models. JMLR 17(125):1−47, 2016.
Parameters
----------
*summaries : k np.arrays of shape (m, n)
observed : list of k np.arrays of shape (1, n)
Returns
-------
np.array of shape (m,)
"""
summaries = np.stack(summaries)
observed = np.stack(observed)
n_ss, _, n_dcc = summaries.shape
obs_max = np.max(observed, axis=2, keepdims=True)
obs_max = np.where(obs_max == 0, 1, obs_max)
y = observed / obs_max
x = summaries / obs_max
# sort to make comparison more robust
y = np.sort(y, axis=2)
x = np.sort(x, axis=2)
# L1 norm divided by the dimension
dist = np.sum(np.abs(x - y), axis=(0, 2)) / (n_ss * n_dcc)
return dist
| bsd-3-clause |
schechter/pykoans | python3/koans/about_multiple_inheritance.py | 96 | 3944 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Slightly based on AboutModules in the Ruby Koans
#
from runner.koan import *
class AboutMultipleInheritance(Koan):
class Nameable:
def __init__(self):
self._name = None
def set_name(self, new_name):
self._name = new_name
def here(self):
return "In Nameable class"
class Animal:
def legs(self):
return 4
def can_climb_walls(self):
return False
def here(self):
return "In Animal class"
class Pig(Animal):
def __init__(self):
super().__init__()
self._name = "Jasper"
@property
def name(self):
return self._name
def speak(self):
return "OINK"
def color(self):
return 'pink'
def here(self):
return "In Pig class"
class Spider(Animal):
def __init__(self):
super().__init__()
self._name = "Boris"
def can_climb_walls(self):
return True
def legs(self):
return 8
def color(self):
return 'black'
def here(self):
return "In Spider class"
class Spiderpig(Pig, Spider, Nameable):
def __init__(self):
super(AboutMultipleInheritance.Pig, self).__init__()
super(AboutMultipleInheritance.Nameable, self).__init__()
self._name = "Jeff"
def speak(self):
return "This looks like a job for Spiderpig!"
def here(self):
return "In Spiderpig class"
#
# Hierarchy:
# Animal
# / \
# Pig Spider Nameable
# \ | /
# Spiderpig
#
# ------------------------------------------------------------------
def test_normal_methods_are_available_in_the_object(self):
jeff = self.Spiderpig()
self.assertRegexpMatches(jeff.speak(), __)
def test_base_class_methods_are_also_available_in_the_object(self):
jeff = self.Spiderpig()
try:
jeff.set_name("Rover")
except:
self.fail("This should not happen")
self.assertEqual(__, jeff.can_climb_walls())
def test_base_class_methods_can_affect_instance_variables_in_the_object(self):
jeff = self.Spiderpig()
self.assertEqual(__, jeff.name)
jeff.set_name("Rover")
self.assertEqual(__, jeff.name)
def test_left_hand_side_inheritance_tends_to_be_higher_priority(self):
jeff = self.Spiderpig()
self.assertEqual(__, jeff.color())
def test_super_class_methods_are_higher_priority_than_super_super_classes(self):
jeff = self.Spiderpig()
self.assertEqual(__, jeff.legs())
def test_we_can_inspect_the_method_resolution_order(self):
#
# MRO = Method Resolution Order
#
mro = type(self.Spiderpig()).mro()
self.assertEqual('Spiderpig', mro[0].__name__)
self.assertEqual('Pig', mro[1].__name__)
self.assertEqual(__, mro[2].__name__)
self.assertEqual(__, mro[3].__name__)
self.assertEqual(__, mro[4].__name__)
self.assertEqual(__, mro[5].__name__)
def test_confirm_the_mro_controls_the_calling_order(self):
jeff = self.Spiderpig()
self.assertRegexpMatches(jeff.here(), 'Spiderpig')
next = super(AboutMultipleInheritance.Spiderpig, jeff)
self.assertRegexpMatches(next.here(), 'Pig')
next = super(AboutMultipleInheritance.Pig, jeff)
self.assertRegexpMatches(next.here(), __)
# Hang on a minute?!? That last class name might be a super class of
# the 'jeff' object, but its hardly a superclass of Pig, is it?
#
# To avoid confusion it may help to think of super() as next_mro().
| mit |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/networkx/algorithms/distance_regular.py | 72 | 5409 | """
=======================
Distance-regular graphs
=======================
"""
# Copyright (C) 2011 by
# Dheeraj M R <[email protected]>
# Aric Hagberg <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Dheeraj M R <[email protected]>',
'Aric Hagberg <[email protected]>'])
__all__ = ['is_distance_regular','intersection_array','global_parameters']
def is_distance_regular(G):
"""Returns True if the graph is distance regular, False otherwise.
A connected graph G is distance-regular if for any nodes x,y
and any integers i,j=0,1,...,d (where d is the graph
diameter), the number of vertices at distance i from x and
distance j from y depends only on i,j and the graph distance
between x and y, independently of the choice of x and y.
Parameters
----------
G: Networkx graph (undirected)
Returns
-------
bool
True if the graph is Distance Regular, False otherwise
Examples
--------
>>> G=nx.hypercube_graph(6)
>>> nx.is_distance_regular(G)
True
See Also
--------
intersection_array, global_parameters
Notes
-----
For undirected and simple graphs only
References
----------
.. [1] Brouwer, A. E.; Cohen, A. M.; and Neumaier, A.
Distance-Regular Graphs. New York: Springer-Verlag, 1989.
.. [2] Weisstein, Eric W. "Distance-Regular Graph."
http://mathworld.wolfram.com/Distance-RegularGraph.html
"""
try:
a=intersection_array(G)
return True
except nx.NetworkXError:
return False
def global_parameters(b,c):
"""Return global parameters for a given intersection array.
Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d
such that for any 2 vertices x,y in G at a distance i=d(x,y), there
are exactly c_i neighbors of y at a distance of i-1 from x and b_i
neighbors of y at a distance of i+1 from x.
Thus, a distance regular graph has the global parameters,
[[c_0,a_0,b_0],[c_1,a_1,b_1],......,[c_d,a_d,b_d]] for the
intersection array [b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d]
where a_i+b_i+c_i=k , k= degree of every vertex.
Parameters
----------
b,c: tuple of lists
Returns
-------
p : list of three-tuples
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> b,c=nx.intersection_array(G)
>>> list(nx.global_parameters(b,c))
[(0, 0, 3), (1, 0, 2), (1, 1, 1), (1, 1, 1), (2, 0, 1), (3, 0, 0)]
References
----------
.. [1] Weisstein, Eric W. "Global Parameters."
From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/GlobalParameters.html
See Also
--------
intersection_array
"""
d=len(b)
ba=b[:]
ca=c[:]
ba.append(0)
ca.insert(0,0)
k = ba[0]
aa = [k-x-y for x,y in zip(ba,ca)]
return zip(*[ca,aa,ba])
def intersection_array(G):
"""Returns the intersection array of a distance-regular graph.
Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d
such that for any 2 vertices x,y in G at a distance i=d(x,y), there
are exactly c_i neighbors of y at a distance of i-1 from x and b_i
neighbors of y at a distance of i+1 from x.
A distance regular graph'sintersection array is given by,
[b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d]
Parameters
----------
G: Networkx graph (undirected)
Returns
-------
b,c: tuple of lists
Examples
--------
>>> G=nx.icosahedral_graph()
>>> nx.intersection_array(G)
([5, 2, 1], [1, 2, 5])
References
----------
.. [1] Weisstein, Eric W. "Intersection Array."
From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/IntersectionArray.html
See Also
--------
global_parameters
"""
if G.is_multigraph() or G.is_directed():
raise nx.NetworkxException('Not implemented for directed ',
'or multiedge graphs.')
# test for regular graph (all degrees must be equal)
degree = G.degree_iter()
(_,k) = next(degree)
for _,knext in degree:
if knext != k:
raise nx.NetworkXError('Graph is not distance regular.')
k = knext
path_length = nx.all_pairs_shortest_path_length(G)
diameter = max([max(path_length[n].values()) for n in path_length])
bint = {} # 'b' intersection array
cint = {} # 'c' intersection array
for u in G:
for v in G:
try:
i = path_length[u][v]
except KeyError: # graph must be connected
raise nx.NetworkXError('Graph is not distance regular.')
# number of neighbors of v at a distance of i-1 from u
c = len([n for n in G[v] if path_length[n][u]==i-1])
# number of neighbors of v at a distance of i+1 from u
b = len([n for n in G[v] if path_length[n][u]==i+1])
# b,c are independent of u and v
if cint.get(i,c) != c or bint.get(i,b) != b:
raise nx.NetworkXError('Graph is not distance regular')
bint[i] = b
cint[i] = c
return ([bint.get(i,0) for i in range(diameter)],
[cint.get(i+1,0) for i in range(diameter)])
| agpl-3.0 |
imsparsh/python-for-android | python3-alpha/python3-src/Lib/ctypes/test/test_python_api.py | 50 | 3066 | from ctypes import *
import unittest, sys
from ctypes.test import is_resource_enabled
################################################################
# This section should be moved into ctypes\__init__.py, when it's ready.
from _ctypes import PyObj_FromPtr
################################################################
from sys import getrefcount as grc
if sys.version_info > (2, 4):
c_py_ssize_t = c_size_t
else:
c_py_ssize_t = c_int
class PythonAPITestCase(unittest.TestCase):
def test_PyBytes_FromStringAndSize(self):
PyBytes_FromStringAndSize = pythonapi.PyBytes_FromStringAndSize
PyBytes_FromStringAndSize.restype = py_object
PyBytes_FromStringAndSize.argtypes = c_char_p, c_py_ssize_t
self.assertEqual(PyBytes_FromStringAndSize(b"abcdefghi", 3), b"abc")
def test_PyString_FromString(self):
pythonapi.PyBytes_FromString.restype = py_object
pythonapi.PyBytes_FromString.argtypes = (c_char_p,)
s = b"abc"
refcnt = grc(s)
pyob = pythonapi.PyBytes_FromString(s)
self.assertEqual(grc(s), refcnt)
self.assertEqual(s, pyob)
del pyob
self.assertEqual(grc(s), refcnt)
if is_resource_enabled("refcount"):
# This test is unreliable, because it is possible that code in
# unittest changes the refcount of the '42' integer. So, it
# is disabled by default.
def test_PyLong_Long(self):
ref42 = grc(42)
pythonapi.PyLong_FromLong.restype = py_object
self.assertEqual(pythonapi.PyLong_FromLong(42), 42)
self.assertEqual(grc(42), ref42)
pythonapi.PyLong_AsLong.argtypes = (py_object,)
pythonapi.PyLong_AsLong.restype = c_long
res = pythonapi.PyLong_AsLong(42)
self.assertEqual(grc(res), ref42 + 1)
del res
self.assertEqual(grc(42), ref42)
def test_PyObj_FromPtr(self):
s = "abc def ghi jkl"
ref = grc(s)
# id(python-object) is the address
pyobj = PyObj_FromPtr(id(s))
self.assertTrue(s is pyobj)
self.assertEqual(grc(s), ref + 1)
del pyobj
self.assertEqual(grc(s), ref)
def test_PyOS_snprintf(self):
PyOS_snprintf = pythonapi.PyOS_snprintf
PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p
buf = c_buffer(256)
PyOS_snprintf(buf, sizeof(buf), b"Hello from %s", b"ctypes")
self.assertEqual(buf.value, b"Hello from ctypes")
PyOS_snprintf(buf, sizeof(buf), b"Hello from %s (%d, %d, %d)", b"ctypes", 1, 2, 3)
self.assertEqual(buf.value, b"Hello from ctypes (1, 2, 3)")
# not enough arguments
self.assertRaises(TypeError, PyOS_snprintf, buf)
def test_pyobject_repr(self):
self.assertEqual(repr(py_object()), "py_object(<NULL>)")
self.assertEqual(repr(py_object(42)), "py_object(42)")
self.assertEqual(repr(py_object(object)), "py_object(%r)" % object)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
Tinkerforge/brickv | src/brickv/plugin_system/plugins/red/program_page_ruby.py | 1 | 10155 | # -*- coding: utf-8 -*-
"""
RED Plugin
Copyright (C) 2014-2015 Matthias Bolte <[email protected]>
Copyright (C) 2014 Olaf Lüke <[email protected]>
program_page_ruby.py: Program Wizard Ruby Page
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
import html
from brickv.plugin_system.plugins.red.program_page import ProgramPage
from brickv.plugin_system.plugins.red.program_utils import *
from brickv.plugin_system.plugins.red.ui_program_page_ruby import Ui_ProgramPageRuby
from brickv.plugin_system.plugins.red.script_manager import check_script_result
def get_ruby_versions(script_manager, callback):
def cb_versions(result):
okay, _ = check_script_result(result)
if okay:
try:
version = result.stdout.split('\n')[0].split(' ')[1]
callback([ExecutableVersion('/usr/bin/ruby', version)])
return
except:
pass
# Could not get versions, we assume that some version of ruby 1.9 is installed
callback([ExecutableVersion('/usr/bin/ruby', '1.9')])
script_manager.execute_script('ruby_versions', cb_versions)
class ProgramPageRuby(ProgramPage, Ui_ProgramPageRuby):
def __init__(self, title_prefix=''):
ProgramPage.__init__(self)
self.setupUi(self)
self.language = Constants.LANGUAGE_RUBY
self.setTitle('{0}{1} Configuration'.format(title_prefix, Constants.language_display_names[self.language]))
self.registerField('ruby.version', self.combo_version)
self.registerField('ruby.start_mode', self.combo_start_mode)
self.registerField('ruby.script_file', self.combo_script_file, 'currentText')
self.registerField('ruby.command', self.edit_command)
self.registerField('ruby.working_directory', self.combo_working_directory, 'currentText')
self.combo_start_mode.currentIndexChanged.connect(self.update_ui_state)
self.combo_start_mode.currentIndexChanged.connect(self.completeChanged.emit)
self.check_show_advanced_options.stateChanged.connect(self.update_ui_state)
self.label_spacer.setText('')
self.combo_script_file_selector = MandatoryTypedFileSelector(self,
self.label_script_file,
self.combo_script_file,
self.label_script_file_type,
self.combo_script_file_type,
self.label_script_file_help)
self.edit_command_checker = MandatoryLineEditChecker(self,
self.label_command,
self.edit_command)
self.combo_working_directory_selector = MandatoryDirectorySelector(self,
self.label_working_directory,
self.combo_working_directory)
self.option_list_editor = ListWidgetEditor(self.label_options,
self.list_options,
self.label_options_help,
self.button_add_option,
self.button_remove_option,
self.button_edit_option,
self.button_up_option,
self.button_down_option,
'<new Ruby option {0}>')
# overrides QWizardPage.initializePage
def initializePage(self):
self.set_formatted_sub_title('Specify how the {language} program [{name}] should be executed.')
self.update_combo_version('ruby', self.combo_version)
self.combo_start_mode.setCurrentIndex(Constants.DEFAULT_RUBY_START_MODE)
self.combo_script_file_selector.reset()
self.check_show_advanced_options.setChecked(False)
self.combo_working_directory_selector.reset()
self.option_list_editor.reset()
# if a program exists then this page is used in an edit wizard
program = self.wizard().program
if program != None:
# start mode
start_mode_api_name = program.cast_custom_option_value('ruby.start_mode', str, '<unknown>')
start_mode = Constants.get_ruby_start_mode(start_mode_api_name)
self.combo_start_mode.setCurrentIndex(start_mode)
# script file
self.combo_script_file_selector.set_current_text(program.cast_custom_option_value('ruby.script_file', str, ''))
# command
self.edit_command.setText(program.cast_custom_option_value('ruby.command', str, ''))
# working directory
self.combo_working_directory_selector.set_current_text(program.working_directory)
# options
self.option_list_editor.clear()
for option in program.cast_custom_option_value_list('ruby.options', str, []):
self.option_list_editor.add_item(option)
self.update_ui_state()
# overrides QWizardPage.isComplete
def isComplete(self):
executable = self.get_executable()
start_mode = self.get_field('ruby.start_mode')
if len(executable) == 0:
return False
if start_mode == Constants.RUBY_START_MODE_SCRIPT_FILE and \
not self.combo_script_file_selector.complete:
return False
if start_mode == Constants.RUBY_START_MODE_COMMAND and \
not self.edit_command_checker.complete:
return False
return self.combo_working_directory_selector.complete and ProgramPage.isComplete(self)
# overrides ProgramPage.update_ui_state
def update_ui_state(self):
start_mode = self.get_field('ruby.start_mode')
start_mode_script_file = start_mode == Constants.RUBY_START_MODE_SCRIPT_FILE
start_mode_command = start_mode == Constants.RUBY_START_MODE_COMMAND
show_advanced_options = self.check_show_advanced_options.isChecked()
self.combo_script_file_selector.set_visible(start_mode_script_file)
self.label_command.setVisible(start_mode_command)
self.edit_command.setVisible(start_mode_command)
self.label_command_help.setVisible(start_mode_command)
self.combo_working_directory_selector.set_visible(show_advanced_options)
self.option_list_editor.set_visible(show_advanced_options)
self.label_spacer.setVisible(not show_advanced_options)
self.option_list_editor.update_ui_state()
def get_executable(self):
return self.combo_version.itemData(self.get_field('ruby.version'))
def get_html_summary(self):
version = self.get_field('ruby.version')
start_mode = self.get_field('ruby.start_mode')
script_file = self.get_field('ruby.script_file')
command = self.get_field('ruby.command')
working_directory = self.get_field('ruby.working_directory')
options = ' '.join(self.option_list_editor.get_items())
html_text = 'Ruby Version: {0}<br/>'.format(html.escape(self.combo_version.itemText(version)))
html_text += 'Start Mode: {0}<br/>'.format(html.escape(Constants.ruby_start_mode_display_names[start_mode]))
if start_mode == Constants.PYTHON_START_MODE_SCRIPT_FILE:
html_text += 'Script File: {0}<br/>'.format(html.escape(script_file))
elif start_mode == Constants.PYTHON_START_MODE_COMMAND:
html_text += 'Command: {0}<br/>'.format(html.escape(command))
html_text += 'Working Directory: {0}<br/>'.format(html.escape(working_directory))
html_text += 'Ruby Options: {0}<br/>'.format(html.escape(options))
return html_text
def get_custom_options(self):
return {
'ruby.start_mode': Constants.ruby_start_mode_api_names[self.get_field('ruby.start_mode')],
'ruby.script_file': self.get_field('ruby.script_file'),
'ruby.command': self.get_field('ruby.command'),
'ruby.options': self.option_list_editor.get_items()
}
def get_command(self):
executable = self.get_executable()
arguments = self.option_list_editor.get_items()
environment = []
start_mode = self.get_field('ruby.start_mode')
if start_mode == Constants.RUBY_START_MODE_SCRIPT_FILE:
arguments.append(self.get_field('ruby.script_file'))
elif start_mode == Constants.RUBY_START_MODE_COMMAND:
arguments.append('-e')
arguments.append(self.get_field('ruby.command'))
working_directory = self.get_field('ruby.working_directory')
return executable, arguments, environment, working_directory
def apply_program_changes(self):
self.apply_program_custom_options_and_command_changes()
| gpl-2.0 |
BaichuanWu/Blog_on_django | site-packages/pip/_vendor/requests/packages/chardet/euckrfreq.py | 3121 | 45978 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = ( \
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
#Everything below is of no interest for detection purpose
2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,
2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,
2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,
2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704,
2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,
2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734,
2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,
2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,
2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,
2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793,
2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,
2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,
2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,
2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,
1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869,
2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883,
2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899,
2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915,
2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331,
2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945,
2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961,
2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976,
2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992,
2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008,
3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021,
3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037,
3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052,
3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066,
3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080,
3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095,
3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110,
3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124,
3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140,
3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156,
3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172,
3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187,
3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201,
3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217,
3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233,
3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248,
3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264,
3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279,
3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295,
3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311,
3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327,
3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343,
3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359,
3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374,
3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389,
3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405,
3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338,
3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432,
3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446,
3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191,
3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471,
3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486,
1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499,
1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513,
3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525,
3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541,
3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557,
3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573,
3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587,
3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603,
3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618,
3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632,
3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648,
3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663,
3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679,
3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695,
3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583,
1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722,
3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738,
3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753,
3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767,
3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782,
3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796,
3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810,
3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591,
1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836,
3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851,
3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866,
3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880,
3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895,
1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905,
3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921,
3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934,
3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603,
3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964,
3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978,
3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993,
3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009,
4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024,
4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040,
1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055,
4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069,
4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083,
4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098,
4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113,
4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610,
4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142,
4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157,
4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173,
4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189,
4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205,
4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220,
4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234,
4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249,
4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265,
4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279,
4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294,
4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310,
4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326,
4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341,
4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357,
4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371,
4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387,
4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403,
4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418,
4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432,
4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446,
4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461,
4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476,
4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491,
4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507,
4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623,
4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536,
4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551,
4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567,
4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581,
4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627,
4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611,
4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626,
4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642,
4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657,
4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672,
4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687,
1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700,
4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715,
4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731,
4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633,
4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758,
4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773,
4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788,
4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803,
4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817,
4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832,
4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847,
4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863,
4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879,
4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893,
4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909,
4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923,
4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938,
4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954,
4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970,
4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645,
4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999,
5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078,
5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028,
1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042,
5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056,
5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072,
5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087,
5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103,
5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118,
1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132,
5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,
5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161,
5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177,
5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192,
5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206,
1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218,
5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,
5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249,
5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262,
5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,
5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,
5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308,
5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323,
5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338,
5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353,
5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369,
5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385,
5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400,
5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415,
5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430,
5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445,
5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461,
5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477,
5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491,
5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507,
5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523,
5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539,
5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554,
5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570,
1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585,
5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600,
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615,
5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,
5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,
5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,
1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673,
5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688,
5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703,
5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716,
5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729,
5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744,
1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758,
5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773,
1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786,
5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801,
5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815,
5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831,
5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847,
5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862,
5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876,
5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889,
5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,
5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687,
5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,
5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963,
5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,
5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993,
5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009,
6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025,
6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039,
6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055,
6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071,
6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086,
6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102,
6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118,
6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133,
6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147,
6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,
6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,
6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194,
6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,
6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225,
6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,
6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256,
6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024
6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287,
6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699,
6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317,
6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,
6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347,
6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,
6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,
6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,
6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411,
6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425,
6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440,
6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456,
6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,
6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488,
6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266,
6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,
6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535,
6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,
1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,
6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581,
6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597,
6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613,
6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629,
6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644,
1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659,
6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674,
1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689,
6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705,
6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721,
6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736,
1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748,
6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763,
6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779,
6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794,
6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711,
6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825,
6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840,
6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856,
6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872,
6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888,
6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903,
6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918,
6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934,
6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950,
6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,
6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981,
6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996,
6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011,
7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,
7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042,
7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,
7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074,
7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090,
7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106,
7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122,
7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138,
7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154,
7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170,
7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186,
7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202,
7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216,
7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232,
7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248,
7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264,
7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280,
7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296,
7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312,
7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327,
7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343,
7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359,
7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,
7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391,
7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407,
7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423,
7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439,
7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455,
7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,
7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,
7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503,
7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,
7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,
7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551,
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567,
7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583,
7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599,
7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615,
7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631,
7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647,
7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663,
7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679,
7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695,
7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711,
7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727,
7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743,
7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759,
7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,
7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,
7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807,
7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,
7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,
7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,
7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,
7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,
7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,
7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,
7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935,
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951,
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967,
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983,
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999,
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111,
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127,
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,
8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,
8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,
8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,
8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,
8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,
8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,
8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,
8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,
8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,
8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,
8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,
8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,
8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,
8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,
8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,
8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,
8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,
8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,
8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,
8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,
8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,
8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,
8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,
8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,
8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,
8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,
8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,
8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,
8736,8737,8738,8739,8740,8741)
# flake8: noqa
| mit |
cfei18/incubator-airflow | tests/contrib/sensors/test_sftp_sensor.py | 15 | 2755 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from mock import patch
from paramiko import SFTP_NO_SUCH_FILE, SFTP_FAILURE
from airflow.contrib.sensors.sftp_sensor import SFTPSensor
class SFTPSensorTest(unittest.TestCase):
@patch('airflow.contrib.sensors.sftp_sensor.SFTPHook')
def test_file_present(self, sftp_hook_mock):
sftp_hook_mock.return_value.get_mod_time.return_value = '19700101000000'
sftp_sensor = SFTPSensor(
task_id='unit_test',
path='/path/to/file/1970-01-01.txt')
context = {
'ds': '1970-01-01'
}
output = sftp_sensor.poke(context)
sftp_hook_mock.return_value.get_mod_time.assert_called_with(
'/path/to/file/1970-01-01.txt')
self.assertTrue(output)
@patch('airflow.contrib.sensors.sftp_sensor.SFTPHook')
def test_file_absent(self, sftp_hook_mock):
sftp_hook_mock.return_value.get_mod_time.side_effect = IOError(
SFTP_NO_SUCH_FILE, 'File missing')
sftp_sensor = SFTPSensor(
task_id='unit_test',
path='/path/to/file/1970-01-01.txt')
context = {
'ds': '1970-01-01'
}
output = sftp_sensor.poke(context)
sftp_hook_mock.return_value.get_mod_time.assert_called_with(
'/path/to/file/1970-01-01.txt')
self.assertFalse(output)
@patch('airflow.contrib.sensors.sftp_sensor.SFTPHook')
def test_sftp_failure(self, sftp_hook_mock):
sftp_hook_mock.return_value.get_mod_time.side_effect = IOError(
SFTP_FAILURE, 'SFTP failure')
sftp_sensor = SFTPSensor(
task_id='unit_test',
path='/path/to/file/1970-01-01.txt')
context = {
'ds': '1970-01-01'
}
with self.assertRaises(IOError):
sftp_sensor.poke(context)
sftp_hook_mock.return_value.get_mod_time.assert_called_with(
'/path/to/file/1970-01-01.txt')
| apache-2.0 |
snua12/zlomekfs | tests/nose-tests/remoteZfs.py | 1 | 10216 | #! /bin/env python
"""
locally, we wrap every RemoteReference into RemoteObject
"""
from twisted.spread import pb
from twisted.internet import reactor
from twisted.python.failure import Failure
import sys
import os
import logging
import tempfile
from insecticide.snapshot import SnapshotDescription
from insecticide.timeoutPlugin import TimeExpired
from subprocess import Popen
from threading import Condition
from zfs import ZfsProxy
from nose.tools import make_decorator
log = logging.getLogger('nose.tests.clientServerTest')
def wrapException(func):
""" Decorator to wrap local exception to twisted Failure object.
:Return:
function test value
:Raise:
pb.Error wrapped exception (if raised from function
:Example usage:
@wrapException
def test_that_fails():
raise Exception()
"""
def newfunc(*arg, **kwargs):
ret = None
try:
ret = func(*arg, **kwargs)
except Exception, value:
(type, value, tb) = sys.exc_info()
raise pb.Error(str(type) + ':' + str(value))
return ret
newfunc = make_decorator(func)(newfunc)
return newfunc
class RemoteException(Exception):
pass
def raiseRemoteException(failure):
tb = failure.getTraceback()
#log.debug('tb is %s(%d) of type %s'%(str(tb),id(tb),type(tb)))
raise RemoteException, str(failure.type) + ':' \
+ failure.getErrorMessage() + ' on\n' \
+ str(failure.getTraceback()), failure.tb
LISTEN_PORT = 8007
CHUNK_SIZE = 4096
class RemoteFile(pb.Referenceable):
def __init__(self, fileName, mode):
self.fh=open(fileName, mode)
@wrapException
def remote_write(self, data):
return self.fh.write(data)
@wrapException
def remote_read(self, size = None):
if size:
return self.fh.read(size)
else:
return self.fh.read()
@wrapException
def remote_getSize(self):
pos = self.fh.tell()
self.fh.seek(0,os.SEEK_END)
size = self.fh.tell()
self.fh.seek(pos,os.SEEK_SET)
return size
@wrapException
def remote_close(self):
return self.fh.close()
@wrapException
def remote_seek(self, offset):
return self.fh.seek(offset, os.SEEK_SET)
@wrapException
def remote_tell(self):
return self.fh.tell()
@wrapException
def remote_getName(self):
return self.fh.name
@wrapException
def remote_delete(self):
os.unlink(self.fh.name)
@wrapException
def remote_flush(self):
self.fh.flush()
class RemoteZfs(pb.Referenceable):
def __init__(self, *arg, **kwargs):
self.zfs = ZfsProxy(*arg, **kwargs)
@wrapException
def remote_start(self):
return self.zfs.runZfs()
@wrapException
def remote_stop(self):
return self.zfs.stopZfs()
@wrapException
def remote_cleanup(self):
return self.zfs.cleanup()
@wrapException
def remote_running(self):
return self.zfs.running()
@wrapException
def remote_hasDied(self):
return self.zfs.hasDied()
@wrapException
def remote_signalAll(self, signal):
return self.zfs.signalAll(signal)
@wrapException
def remote_snapshot(self):
toDir = tempfile.mkdtemp(prefix="noseSnapshot")
snapshot = SnapshotDescription(toDir)
self.zfs.snapshot(snapshot)
(handle,fileName) = tempfile.mkstemp()
snapshot.pack(fileName)
snapshot.delete()
return RemoteFile(fileName, 'r')
@wrapException
def remote_getMountPoint(self):
return self.zfs.zfsRoot
class RemoteControl(pb.Root):
@wrapException
def remote_system(self, cmdLine):
print 'executing ' + str(cmdLine)
proc = Popen(args = cmdLine)
proc.wait()
return proc.returncode
@wrapException
def remote_open(self, fileName, mode):
return RemoteFile(fileName, mode)
@wrapException
def remote_makedirs(self, dirName):
try:
return os.makedirs(dirName)
except OSError:
pass
@wrapException
def remote_delete(self, fileName):
return os.unlink(fileName)
@wrapException
def remote_restart(self):
print 'executing ' + str(sys.argv)
reactor.stop()
os.execv(sys.argv[0],sys.argv)
@wrapException
def remote_getZfs(self, *arg, **kwargs):
return RemoteZfs(*arg, **kwargs)
class SimpleRemoteCall(object):
returncode = None
returned = False
def signal(self):
self.cond.acquire()
self.returned = True
self.cond.notify()
self.cond.release()
def errorHandler(self, error):
self.returncode = error
self.signal()
def successHandler(self, object):
self.returncode = object
self.signal()
def __call__(*arg, **kwargs):
pass
def __init__(self, remoteReference, *arg, **kwargs):
self.cond = Condition()
deref = remoteReference.callRemote(*arg, **kwargs)
deref.addCallbacks(self.successHandler, self.errorHandler)
def wait(self, timeout = None):
self.cond.acquire()
if not self.returned:
self.cond.wait(timeout)
self.cond.release()
if not self.returned:
raise TimeExpired('timeout')
class GetRootObject(SimpleRemoteCall):
def __init__(self, reactor, host = 'localhost', port = LISTEN_PORT):
self.cond = Condition()
factory = pb.PBClientFactory()
reactor.connectTCP(host, port, factory)
deref = factory.getRootObject()
deref.addCallbacks(getattr(self,'successHandler'), getattr(self, 'errorHandler'))
class ReactorWrapper(object):
def __init__(self, reactor, reactorThread, timeout = 10):
self.reactor = reactor
self.reactorThread = reactorThread
self.timeout = timeout
def setTimeout(self, timeout):
self.timeout = timeout
def call(self, remoteReference, *arg, **kwargs):
call = SimpleRemoteCall(remoteReference, *arg, **kwargs)
self.reactor.callFromThread(call)
#call = cls(*arg, **kwargs)
call.wait(timeout = self.timeout)
if isinstance(call.returncode, Failure):
raiseRemoteException(call.returncode)
else:
return call.returncode
def getRemoteObject(self, remoteReference, *args, **kwargs):
ret = self.call(remoteReference, *args, **kwargs)
if not isinstance(ret, pb.RemoteReference):
raise TypeError('Invalid return value of type %s', str(type(ret)))
elif isinstance(ret, Failure):
raiseRemoteException(ret)
else:
return RemoteObjectWrapper(self, ret)
class RemoteObjectWrapper(object):
def __init__(self, reactorWrapper, remoteReference):
self.remoteReference = remoteReference
self.reactorWrapper = reactorWrapper
def call(self, *args, **kwargs):
return self.reactorWrapper.call(self.remoteReference, *args, **kwargs)
def getRemoteObject(self, *args, **kwargs):
return self.reactorWrapper.getRemoteObject(self.remoteReference, *args,
**kwargs)
class RemoteControlWrapper(RemoteObjectWrapper):
def __init__(self, reactorWrapper, host = 'localhost', port = LISTEN_PORT):
call = GetRootObject(reactorWrapper.reactor, host = host,
port = port)
reactorWrapper.reactor.callFromThread(call)
call.wait(reactorWrapper.timeout)
if isinstance(call.returncode, Failure):
raiseRemoteException(call.returncode)
elif not isinstance(call.returncode, pb.RemoteReference):
raise RemoteException("Can't get remoteControl " + str(call.returncode))
RemoteObjectWrapper.__init__(self, reactorWrapper, call.returncode)
def uploadFile(self, fromFile, toFile = None, remoteFile = None):
if not toFile:
toFile = fromFile
if not remoteFile:
dir = os.path.dirname(toFile)
if dir:
self.call('makedirs', dir)
remoteFile = self.getRemoteObject('open', toFile, 'wb+')
localFile = open(fromFile, 'r')
chunk = localFile.read(CHUNK_SIZE)
while chunk:
remoteFile.call('write', chunk)
chunk = localFile.read(CHUNK_SIZE)
localFile.close()
remoteFile.call('close')
def downloadFile(self, fromFile = None, toFile = None, remoteFile = None):
if not toFile and not fromFile:
raise AttributeError('either source or target must be specified')
if not toFile and fromFile:
toFile = fromFile
elif not fromFile and toFile:
fromFile = toFile
if not remoteFile:
dir = os.path.dirname(toFile)
if dir:
try:
os.makedirs(dir)
except OSError:
pass
remoteFile = self.getRemoteObject('open', fromFile, 'r')
localFile = open(toFile, 'wb+')
chunk = remoteFile.call('read', CHUNK_SIZE)
while chunk:
localFile.write(chunk)
chunk = remoteFile.call('read', CHUNK_SIZE)
localFile.close()
remoteFile.call('close')
if __name__ == '__main__':
(dirPath, scriptName) = os.path.split(sys.argv[0])
if dirPath:
os.chdir(dirPath)
sys.argv[0] = './' + scriptName
reactor.listenTCP(LISTEN_PORT, pb.PBServerFactory(RemoteControl()))
reactor.run()
| gpl-2.0 |
OpenSourcePolicyCenter/webapp-public | webapp/apps/dynamic/tests/utils.py | 2 | 1304 | START_YEAR = '2016'
def do_dynamic_sim(client, base_name, microsim_response, pe_reform,
start_year=START_YEAR):
# Link to dynamic simulation
idx = microsim_response.url[:-1].rfind('/')
model_num = microsim_response.url[idx + 1:-1]
dynamic_landing = '/dynamic/{1}/?start_year={2}'.format(
base_name, model_num, start_year)
response = client.get(dynamic_landing)
assert response.status_code == 200
# Go to behavioral input page
dynamic_behavior = '/dynamic/{0}/{1}/?start_year={2}'.format(
base_name, model_num, start_year)
response = client.get(dynamic_behavior)
assert response.status_code == 200
# Do the partial equilibrium job submission
response = client.post(dynamic_behavior, pe_reform)
assert response.status_code == 302
print(response)
# The results page will now succeed
next_response = client.get(response.url)
reload_count = 0
while reload_count < 2:
if next_response.status_code == 200:
break
elif next_response.status_code == 302:
next_response = client.get(next_response.url)
reload_count = 0
else:
raise RuntimeError("unable to load results page")
assert "results/" in response.url
return response
| mit |
ivmech/iviny-scope | lib/xlsxwriter/test/comparison/test_chart_font05.py | 1 | 2453 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, [email protected]
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_font05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [49407488, 53740288]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_title({'name': 'Title'})
chart.set_x_axis({
'name': 'XXX',
'num_font': {'name': 'Arial', 'pitch_family': 34, 'charset': 0}
})
chart.set_y_axis({
'name': 'YYY',
'num_font': {'bold': 1, 'italic': 1, 'underline': 1}
})
worksheet.insert_chart('E9', chart)
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
alephobjects/Cura | Cura/util/pluginInfo.py | 1 | 5074 | """
The plugin module contains information about the plugins found for Cura.
It keeps track of a list of installed plugins and the information contained within.
"""
__copyright__ = "Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License"
import os
import sys
import traceback
import platform
import re
import tempfile
import cPickle as pickle
from Cura.util import profile
from Cura.util import resources
_pluginList = None
class pluginInfo(object):
"""
Plugin information object. Used to keep track of information about the available plugins in this installation of Cura.
Each plugin as meta-data associated with it which can be retrieved from this class.
"""
def __init__(self, dirname, filename):
self._dirname = dirname
self._filename = filename
self._name = os.path.splitext(os.path.basename(filename))[0]
self._type = 'unknown'
self._info = ''
self._params = []
with open(os.path.join(dirname, filename), "r") as f:
for line in f:
line = line.strip()
if not line.startswith('#'):
break
line = line[1:].split(':', 1)
if len(line) != 2:
continue
if line[0].upper() == 'NAME':
self._name = line[1].strip()
elif line[0].upper() == 'INFO':
self._info = line[1].strip()
elif line[0].upper() == 'TYPE':
self._type = line[1].strip()
elif line[0].upper() == 'DEPEND':
pass
elif line[0].upper() == 'PARAM':
m = re.match('([a-zA-Z][a-zA-Z0-9_]*)\(([a-zA-Z_]*)(?::([^\)]*))?\) +(.*)', line[1].strip())
if m is not None:
self._params.append({'name': m.group(1), 'type': m.group(2), 'default': m.group(3), 'description': m.group(4)})
# else:
# print "Unknown item in plugin meta data: %s %s" % (line[0], line[1])
def getFilename(self):
return self._filename
def getFullFilename(self):
return os.path.join(self._dirname, self._filename)
def getType(self):
return self._type
def getName(self):
return self._name
def getInfo(self):
return self._info
def getParams(self):
return self._params
def getPostProcessPluginConfig():
try:
return pickle.loads(str(profile.getProfileSetting('plugin_config')))
except:
return []
def setPostProcessPluginConfig(config):
profile.putProfileSetting('plugin_config', pickle.dumps(config))
def overridePostProcessPluginConfig(config):
profile.setTempOverride('plugin_config', pickle.dumps(config))
def getPluginBasePaths():
ret = []
if platform.system() != "Windows":
ret.append(os.path.expanduser('~/.cura/plugins/'))
if platform.system() == "Darwin" and hasattr(sys, 'frozen'):
ret.append(os.path.normpath(os.path.join(resources.resourceBasePath, "plugins")))
else:
ret.append(os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'plugins')))
return ret
def getPluginList(pluginType):
global _pluginList
if _pluginList is None:
_pluginList = []
for basePath in getPluginBasePaths():
if os.path.isdir(basePath):
for filename in os.listdir(basePath):
if filename.startswith('.'):
continue
if filename.startswith('_'):
continue
if os.path.isdir(os.path.join(basePath, filename)):
if os.path.exists(os.path.join(basePath, filename, 'script.py')):
_pluginList.append(pluginInfo(basePath, os.path.join(filename, 'script.py')))
elif filename.endswith('.py'):
_pluginList.append(pluginInfo(basePath, filename))
ret = []
for plugin in _pluginList:
if plugin.getType() == pluginType:
ret.append(plugin)
return ret
def runPostProcessingPlugins(engineResult, pluginConfigList):
pluginList = getPluginList('postprocess')
tempfilename = None
for pluginConfig in pluginConfigList:
plugin = None
for pluginTest in pluginList:
if pluginTest.getFilename() == pluginConfig['filename']:
plugin = pluginTest
if plugin is None:
continue
pythonFile = plugin.getFullFilename()
if tempfilename is None:
f = tempfile.NamedTemporaryFile(prefix='CuraPluginTemp', delete=False)
tempfilename = f.name
gcode = engineResult.getGCode()
while True:
data = gcode.read(16 * 1024)
if len(data) == 0:
break
f.write(data)
f.close()
del gcode
locals = {'filename': tempfilename}
for param in plugin.getParams():
value = param['default']
if param['name'] in pluginConfig['params']:
value = pluginConfig['params'][param['name']]
if param['type'] == 'float':
try:
value = float(value)
except:
value = float(param['default'])
locals[param['name']] = value
try:
execfile(pythonFile, locals)
except:
locationInfo = traceback.extract_tb(sys.exc_info()[2])[-1]
return "%s: '%s' @ %s:%s:%d" % (str(sys.exc_info()[0].__name__), str(sys.exc_info()[1]), os.path.basename(locationInfo[0]), locationInfo[2], locationInfo[1])
if tempfilename is not None:
f = open(tempfilename, "r")
engineResult.setGCode("")
import gc
gc.collect()
data = f.read(4096)
while len(data) > 0:
engineResult._gcodeData.write(data)
data = f.read(4096)
f.close()
os.unlink(tempfilename)
return None
| agpl-3.0 |
tpo/ansible | test/support/integration/plugins/modules/ufw.py | 28 | 22566 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Ahti Kitsik <[email protected]>
# Copyright: (c) 2014, Jarno Keskikangas <[email protected]>
# Copyright: (c) 2013, Aleksey Ovcharenko <[email protected]>
# Copyright: (c) 2013, James Martin <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ufw
short_description: Manage firewall with UFW
description:
- Manage firewall with UFW.
version_added: 1.6
author:
- Aleksey Ovcharenko (@ovcharenko)
- Jarno Keskikangas (@pyykkis)
- Ahti Kitsik (@ahtik)
notes:
- See C(man ufw) for more examples.
requirements:
- C(ufw) package
options:
state:
description:
- C(enabled) reloads firewall and enables firewall on boot.
- C(disabled) unloads firewall and disables firewall on boot.
- C(reloaded) reloads firewall.
- C(reset) disables and resets firewall to installation defaults.
type: str
choices: [ disabled, enabled, reloaded, reset ]
default:
description:
- Change the default policy for incoming or outgoing traffic.
type: str
choices: [ allow, deny, reject ]
aliases: [ policy ]
direction:
description:
- Select direction for a rule or default policy command. Mutually
exclusive with I(interface_in) and I(interface_out).
type: str
choices: [ in, incoming, out, outgoing, routed ]
logging:
description:
- Toggles logging. Logged packets use the LOG_KERN syslog facility.
type: str
choices: [ 'on', 'off', low, medium, high, full ]
insert:
description:
- Insert the corresponding rule as rule number NUM.
- Note that ufw numbers rules starting with 1.
type: int
insert_relative_to:
description:
- Allows to interpret the index in I(insert) relative to a position.
- C(zero) interprets the rule number as an absolute index (i.e. 1 is
the first rule).
- C(first-ipv4) interprets the rule number relative to the index of the
first IPv4 rule, or relative to the position where the first IPv4 rule
would be if there is currently none.
- C(last-ipv4) interprets the rule number relative to the index of the
last IPv4 rule, or relative to the position where the last IPv4 rule
would be if there is currently none.
- C(first-ipv6) interprets the rule number relative to the index of the
first IPv6 rule, or relative to the position where the first IPv6 rule
would be if there is currently none.
- C(last-ipv6) interprets the rule number relative to the index of the
last IPv6 rule, or relative to the position where the last IPv6 rule
would be if there is currently none.
type: str
choices: [ first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero ]
default: zero
version_added: "2.8"
rule:
description:
- Add firewall rule
type: str
choices: [ allow, deny, limit, reject ]
log:
description:
- Log new connections matched to this rule
type: bool
from_ip:
description:
- Source IP address.
type: str
default: any
aliases: [ from, src ]
from_port:
description:
- Source port.
type: str
to_ip:
description:
- Destination IP address.
type: str
default: any
aliases: [ dest, to]
to_port:
description:
- Destination port.
type: str
aliases: [ port ]
proto:
description:
- TCP/IP protocol.
type: str
choices: [ any, tcp, udp, ipv6, esp, ah, gre, igmp ]
aliases: [ protocol ]
name:
description:
- Use profile located in C(/etc/ufw/applications.d).
type: str
aliases: [ app ]
delete:
description:
- Delete rule.
type: bool
interface:
description:
- Specify interface for the rule. The direction (in or out) used
for the interface depends on the value of I(direction). See
I(interface_in) and I(interface_out) for routed rules that needs
to supply both an input and output interface. Mutually
exclusive with I(interface_in) and I(interface_out).
type: str
aliases: [ if ]
interface_in:
description:
- Specify input interface for the rule. This is mutually
exclusive with I(direction) and I(interface). However, it is
compatible with I(interface_out) for routed rules.
type: str
aliases: [ if_in ]
version_added: "2.10"
interface_out:
description:
- Specify output interface for the rule. This is mutually
exclusive with I(direction) and I(interface). However, it is
compatible with I(interface_in) for routed rules.
type: str
aliases: [ if_out ]
version_added: "2.10"
route:
description:
- Apply the rule to routed/forwarded packets.
type: bool
comment:
description:
- Add a comment to the rule. Requires UFW version >=0.35.
type: str
version_added: "2.4"
'''
EXAMPLES = r'''
- name: Allow everything and enable UFW
ufw:
state: enabled
policy: allow
- name: Set logging
ufw:
logging: 'on'
# Sometimes it is desirable to let the sender know when traffic is
# being denied, rather than simply ignoring it. In these cases, use
# reject instead of deny. In addition, log rejected connections:
- ufw:
rule: reject
port: auth
log: yes
# ufw supports connection rate limiting, which is useful for protecting
# against brute-force login attacks. ufw will deny connections if an IP
# address has attempted to initiate 6 or more connections in the last
# 30 seconds. See http://www.debian-administration.org/articles/187
# for details. Typical usage is:
- ufw:
rule: limit
port: ssh
proto: tcp
# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
# a rule=allow task can leave those ports exposed. Either use delete=yes
# or a separate state=reset task)
- ufw:
rule: allow
name: OpenSSH
- name: Delete OpenSSH rule
ufw:
rule: allow
name: OpenSSH
delete: yes
- name: Deny all access to port 53
ufw:
rule: deny
port: '53'
- name: Allow port range 60000-61000
ufw:
rule: allow
port: 60000:61000
proto: tcp
- name: Allow all access to tcp port 80
ufw:
rule: allow
port: '80'
proto: tcp
- name: Allow all access from RFC1918 networks to this host
ufw:
rule: allow
src: '{{ item }}'
loop:
- 10.0.0.0/8
- 172.16.0.0/12
- 192.168.0.0/16
- name: Deny access to udp port 514 from host 1.2.3.4 and include a comment
ufw:
rule: deny
proto: udp
src: 1.2.3.4
port: '514'
comment: Block syslog
- name: Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
ufw:
rule: allow
interface: eth0
direction: in
proto: udp
src: 1.2.3.5
from_port: '5469'
dest: 1.2.3.4
to_port: '5469'
# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
- name: Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host
ufw:
rule: deny
proto: tcp
src: 2001:db8::/32
port: '25'
- name: Deny all IPv6 traffic to tcp port 20 on this host
# this should be the first IPv6 rule
ufw:
rule: deny
proto: tcp
port: '20'
to_ip: "::"
insert: 0
insert_relative_to: first-ipv6
- name: Deny all IPv4 traffic to tcp port 20 on this host
# This should be the third to last IPv4 rule
# (insert: -1 addresses the second to last IPv4 rule;
# so the new rule will be inserted before the second
# to last IPv4 rule, and will be come the third to last
# IPv4 rule.)
ufw:
rule: deny
proto: tcp
port: '20'
to_ip: "::"
insert: -1
insert_relative_to: last-ipv4
# Can be used to further restrict a global FORWARD policy set to allow
- name: Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24
ufw:
rule: deny
route: yes
src: 1.2.3.0/24
dest: 4.5.6.0/24
'''
import re
from operator import itemgetter
from ansible.module_utils.basic import AnsibleModule
def compile_ipv4_regexp():
r = r"((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}"
r += r"(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])"
return re.compile(r)
def compile_ipv6_regexp():
"""
validation pattern provided by :
https://stackoverflow.com/questions/53497/regular-expression-that-matches-
valid-ipv6-addresses#answer-17871737
"""
r = r"(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:"
r += r"|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}"
r += r"(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4})"
r += r"{1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]"
r += r"{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]"
r += r"{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4})"
r += r"{0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]"
r += r"|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
r += r"[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
r += r"[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"
return re.compile(r)
def main():
command_keys = ['state', 'default', 'rule', 'logging']
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
delete=dict(type='bool', default=False),
route=dict(type='bool', default=False),
insert=dict(type='int'),
insert_relative_to=dict(choices=['zero', 'first-ipv4', 'last-ipv4', 'first-ipv6', 'last-ipv6'], default='zero'),
rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
interface=dict(type='str', aliases=['if']),
interface_in=dict(type='str', aliases=['if_in']),
interface_out=dict(type='str', aliases=['if_out']),
log=dict(type='bool', default=False),
from_ip=dict(type='str', default='any', aliases=['from', 'src']),
from_port=dict(type='str'),
to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
to_port=dict(type='str', aliases=['port']),
proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp']),
name=dict(type='str', aliases=['app']),
comment=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['name', 'proto', 'logging'],
# Mutual exclusivity with `interface` implied by `required_by`.
['direction', 'interface_in'],
['direction', 'interface_out'],
],
required_one_of=([command_keys]),
required_by=dict(
interface=('direction', ),
),
)
cmds = []
ipv4_regexp = compile_ipv4_regexp()
ipv6_regexp = compile_ipv6_regexp()
def filter_line_that_not_start_with(pattern, content):
return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)])
def filter_line_that_contains(pattern, content):
return [line for line in content.splitlines(True) if pattern in line]
def filter_line_that_not_contains(pattern, content):
return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)])
def filter_line_that_match_func(match_func, content):
return ''.join([line for line in content.splitlines(True) if match_func(line) is not None])
def filter_line_that_contains_ipv4(content):
return filter_line_that_match_func(ipv4_regexp.search, content)
def filter_line_that_contains_ipv6(content):
return filter_line_that_match_func(ipv6_regexp.search, content)
def is_starting_by_ipv4(ip):
return ipv4_regexp.match(ip) is not None
def is_starting_by_ipv6(ip):
return ipv6_regexp.match(ip) is not None
def execute(cmd, ignore_error=False):
cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
cmds.append(cmd)
(rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"})
if rc != 0 and not ignore_error:
module.fail_json(msg=err or out, commands=cmds)
return out
def get_current_rules():
user_rules_files = ["/lib/ufw/user.rules",
"/lib/ufw/user6.rules",
"/etc/ufw/user.rules",
"/etc/ufw/user6.rules",
"/var/lib/ufw/user.rules",
"/var/lib/ufw/user6.rules"]
cmd = [[grep_bin], ["-h"], ["'^### tuple'"]]
cmd.extend([[f] for f in user_rules_files])
return execute(cmd, ignore_error=True)
def ufw_version():
"""
Returns the major and minor version of ufw installed on the system.
"""
out = execute([[ufw_bin], ["--version"]])
lines = [x for x in out.split('\n') if x.strip() != '']
if len(lines) == 0:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
if matches is None:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
# Convert version to numbers
major = int(matches.group(1))
minor = int(matches.group(2))
rev = 0
if matches.group(3) is not None:
rev = int(matches.group(3))
return major, minor, rev
params = module.params
commands = dict((key, params[key]) for key in command_keys if params[key])
# Ensure ufw is available
ufw_bin = module.get_bin_path('ufw', True)
grep_bin = module.get_bin_path('grep', True)
# Save the pre state and rules in order to recognize changes
pre_state = execute([[ufw_bin], ['status verbose']])
pre_rules = get_current_rules()
changed = False
# Execute filter
for (command, value) in commands.items():
cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
if command == 'state':
states = {'enabled': 'enable', 'disabled': 'disable',
'reloaded': 'reload', 'reset': 'reset'}
if value in ['reloaded', 'reset']:
changed = True
if module.check_mode:
# "active" would also match "inactive", hence the space
ufw_enabled = pre_state.find(" active") != -1
if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled):
changed = True
else:
execute(cmd + [['-f'], [states[value]]])
elif command == 'logging':
extract = re.search(r'Logging: (on|off)(?: \(([a-z]+)\))?', pre_state)
if extract:
current_level = extract.group(2)
current_on_off_value = extract.group(1)
if value != "off":
if current_on_off_value == "off":
changed = True
elif value != "on" and value != current_level:
changed = True
elif current_on_off_value != "off":
changed = True
else:
changed = True
if not module.check_mode:
execute(cmd + [[command], [value]])
elif command == 'default':
if params['direction'] not in ['outgoing', 'incoming', 'routed', None]:
module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.')
if module.check_mode:
regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)'
extract = re.search(regexp, pre_state)
if extract is not None:
current_default_values = {}
current_default_values["incoming"] = extract.group(1)
current_default_values["outgoing"] = extract.group(2)
current_default_values["routed"] = extract.group(3)
v = current_default_values[params['direction'] or 'incoming']
if v not in (value, 'disabled'):
changed = True
else:
changed = True
else:
execute(cmd + [[command], [value], [params['direction']]])
elif command == 'rule':
if params['direction'] not in ['in', 'out', None]:
module.fail_json(msg='For rules, direction must be one of "in" and "out", or direction must not be specified.')
if not params['route'] and params['interface_in'] and params['interface_out']:
module.fail_json(msg='Only route rules can combine '
'interface_in and interface_out')
# Rules are constructed according to the long format
#
# ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
# [proto protocol] [app application] [comment COMMENT]
cmd.append([module.boolean(params['route']), 'route'])
cmd.append([module.boolean(params['delete']), 'delete'])
if params['insert'] is not None:
relative_to_cmd = params['insert_relative_to']
if relative_to_cmd == 'zero':
insert_to = params['insert']
else:
(dummy, numbered_state, dummy) = module.run_command([ufw_bin, 'status', 'numbered'])
numbered_line_re = re.compile(R'^\[ *([0-9]+)\] ')
lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()]
lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher]
last_number = max([no for (no, ipv6) in lines]) if lines else 0
has_ipv4 = any([not ipv6 for (no, ipv6) in lines])
has_ipv6 = any([ipv6 for (no, ipv6) in lines])
if relative_to_cmd == 'first-ipv4':
relative_to = 1
elif relative_to_cmd == 'last-ipv4':
relative_to = max([no for (no, ipv6) in lines if not ipv6]) if has_ipv4 else 1
elif relative_to_cmd == 'first-ipv6':
relative_to = max([no for (no, ipv6) in lines if not ipv6]) + 1 if has_ipv4 else 1
elif relative_to_cmd == 'last-ipv6':
relative_to = last_number if has_ipv6 else last_number + 1
insert_to = params['insert'] + relative_to
if insert_to > last_number:
# ufw does not like it when the insert number is larger than the
# maximal rule number for IPv4/IPv6.
insert_to = None
cmd.append([insert_to is not None, "insert %s" % insert_to])
cmd.append([value])
cmd.append([params['direction'], "%s" % params['direction']])
cmd.append([params['interface'], "on %s" % params['interface']])
cmd.append([params['interface_in'], "in on %s" % params['interface_in']])
cmd.append([params['interface_out'], "out on %s" % params['interface_out']])
cmd.append([module.boolean(params['log']), 'log'])
for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
('to_ip', "to %s"), ('to_port', "port %s"),
('proto', "proto %s"), ('name', "app '%s'")]:
value = params[key]
cmd.append([value, template % (value)])
ufw_major, ufw_minor, dummy = ufw_version()
# comment is supported only in ufw version after 0.35
if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
cmd.append([params['comment'], "comment '%s'" % params['comment']])
rules_dry = execute(cmd)
if module.check_mode:
nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry))
if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))):
rules_dry = filter_line_that_not_start_with("### tuple", rules_dry)
# ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules
if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']):
if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry):
changed = True
elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']):
if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry):
changed = True
elif pre_rules != rules_dry:
changed = True
# Get the new state
if module.check_mode:
return module.exit_json(changed=changed, commands=cmds)
else:
post_state = execute([[ufw_bin], ['status'], ['verbose']])
if not changed:
post_rules = get_current_rules()
changed = (pre_state != post_state) or (pre_rules != post_rules)
return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
if __name__ == '__main__':
main()
| gpl-3.0 |
sekikn/ambari | ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/hook.py | 2 | 1181 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from shared_initialization import setup_users, setup_hadoop_env, setup_java, setup_env
from resource_management import Hook
class BeforeAnyHook(Hook):
def hook(self, env):
import params
env.set_params(params)
setup_users()
if params.has_namenode or params.dfs_type == 'HCFS':
setup_hadoop_env()
setup_env()
setup_java()
if __name__ == "__main__":
BeforeAnyHook().execute()
| apache-2.0 |
pshen/ansible | lib/ansible/module_utils/gce.py | 187 | 2535 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Franck Cuny <[email protected]>, 2014
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
HAS_LIBCLOUD_BASE = True
except ImportError:
HAS_LIBCLOUD_BASE = False
from ansible.module_utils.gcp import gcp_connect
from ansible.module_utils.gcp import unexpected_error_msg as gcp_error
USER_AGENT_PRODUCT = "Ansible-gce"
USER_AGENT_VERSION = "v1"
def gce_connect(module, provider=None):
"""Return a GCP connection for Google Compute Engine."""
if not HAS_LIBCLOUD_BASE:
module.fail_json(msg='libcloud must be installed to use this module')
provider = provider or Provider.GCE
return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION)
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
return gcp_error(error)
| gpl-3.0 |
bqbn/addons-server | src/olympia/translations/hold.py | 3 | 1610 | from threading import local
import django.dispatch
from django.core.signals import request_finished
_to_save = local()
def add_translation(*, instance, translation, field):
"""
Queue a `translation` that needs to be saved for a particular `field` on
`instance`.
"""
if not hasattr(_to_save, 'translations'):
_to_save.translations = {}
key = make_key(instance)
_to_save.translations.setdefault(key, [])
_to_save.translations[key].append((field.name, translation))
def clean_translations(sender, **kwargs):
"""
Removes all translations in the queue.
"""
if hasattr(_to_save, 'translations'):
_to_save.translations = {}
def make_key(obj):
"""Returns a key for this object."""
return id(obj)
def save_translations(instance):
"""
For a given instance, save all the translations in the queue and then
clear them from the queue.
"""
if not hasattr(_to_save, 'translations'):
return
key = make_key(instance)
for field_name, translation in _to_save.translations.get(key, []):
is_new = translation.autoid is None
translation.save(force_insert=is_new, force_update=not is_new)
translation_saved.send(
sender=instance.__class__, instance=instance,
field_name=field_name)
if key in _to_save.translations:
del _to_save.translations[key]
# Ensure that on request completion, we flush out any unsaved translations.
request_finished.connect(clean_translations, dispatch_uid='clean_translations')
translation_saved = django.dispatch.Signal()
| bsd-3-clause |
ariebovenberg/snug | examples/github/types.py | 2 | 2883 | """datastructures and type definitions"""
import enum
import reprlib
import typing as t
from datetime import datetime
from functools import partial
from dataclasses import dataclass
_repr = reprlib.Repr()
_repr.maxstring = 45
dclass = partial(dataclass, frozen=True, repr=False)
@dclass()
class UserSummary:
login: str
id: int
avatar_url: str
gravatar_id: str
# full: 'User'
html_url: str
type: str
site_admin: bool
def __repr__(self):
return f'<User: {self.login}>'
@dclass()
class User(UserSummary):
bio: str
blog: str
company: str
created_at: datetime
email: str
id: str
location: str
login: str
name: str
repos_url: str
site_admin: str
updated_at: datetime
url: str
@dclass()
class RepoSummary:
id: int
owner: UserSummary
name: str
full_name: str
description: str
private: bool
fork: bool
url: str
html_url: str
def __repr__(self):
return f'<RepoSummary: {self.name} | {_repr.repr(self.description)}>'
@dclass()
class Repo(RepoSummary):
created_at: datetime
default_branch: str
description: str
full_name: str
homepage: str
id: int
language: str
name: str
open_issues_count: int
owner: UserSummary
private: bool
pushed_at: datetime
size: float
stargazers_count: int
updated_at: datetime
watchers_count: int
@dclass()
class OrganizationSummary:
"""basic details of a github organization"""
id: int
description: t.Optional[str]
login: str
@dclass()
class Organization(OrganizationSummary):
"""a github organization"""
blog: t.Optional[str]
created_at: t.Optional[datetime]
name: t.Optional[str]
repos_url: str
type: t.Optional[str]
def __repr__(self):
return '<Organization: {self.login}>'
@dclass()
class Issue:
"""a github issue or pull-request"""
class State(enum.Enum):
OPEN = 'open'
CLOSED = 'closed'
ALL = 'all'
number: str
title: str
body: str
state: State
def __repr__(self):
return f'<Issue: #{self.number} {self.title}>'
class Sort(enum.Enum):
CREATED = 'created'
UPDATED = 'updated'
COMMENTS = 'comments'
class Filter(enum.Enum):
ASSIGNED = 'assigned'
CREATED = 'created'
MENTIONED = 'mentioned'
SUBSCRIBED = 'subscribed'
ALL = 'all'
@dclass
class Comment:
"""an issue comment"""
id: int
user: UserSummary
body: str
| mit |
rafaellc28/Latex2MiniZinc | latex2minizinc/Expression.py | 1 | 1933 | class Expression:
"""
Class representing a expression node in the AST of a MLP
"""
def __init__(self):
self.indice = -1
self.identifier = None
self.identifierName = None
self.identifierList = None
self.isInSet = False
self.isSet = None
self.isVar = None
self.isParam = None
self.isReal = False
self.isSymbolic = False
self.isLogical = False
self.isBinary = False
self.isInteger = False
self.isNatural = False
self.isEnum = False
self.isArray = False
self.isSubIndice = False
self.isInt = False
self.isSetOfInt = False
self.isDeclaredAsParam = None
self.isDeclaredAsSet = None
self.isDeclaredAsVar = None
self.checkIfIsDummyIndex = False
self.symbolTable = None
def getSymbolTable(self):
return self.symbolTable
def setSymbolTable(self, symbolTable):
self.symbolTable = symbolTable
def getSymbol(self):
return self
def getSymbolName(self, codeGenerator):
return self.generateCode(codeGenerator)
def getDimension(self):
return 1
def getIndice(self):
return self.indice
def setIndice(self, indice):
self.indice = indice
def getIdentifier(self):
return self.identifier
def setIdentifier(self, identifier):
self.identifier = identifier
def getIdentifierName(self):
return self.identifierName
def setIdentifierName(self, identifierName):
self.identifierName = identifierName
def getIdentifierList(self):
return self.identifierList
def setIdentifierList(self, identifierList):
self.identifierList = identifierList
def enableCheckDummyIndices(self):
pass
def disableCheckDummyIndices(self):
pass
| mit |
cstavr/synnefo | snf-cyclades-app/synnefo/api/compute_urls.py | 9 | 1401 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import include, patterns
from snf_django.lib.api import api_endpoint_not_found
from synnefo.api import (servers, flavors, images, extensions)
from synnefo.api.versions import versions_list, version_details
#
# The OpenStack Compute API v2.0
#
compute_api20_patterns = patterns(
'',
(r'^servers', include(servers)),
(r'^flavors', include(flavors)),
(r'^images', include(images)),
(r'^extensions', include(extensions)),
)
urlpatterns = patterns(
'',
(r'^(?:.json|.xml|.atom)?$', versions_list),
(r'^v2.0/(?:.json|.xml|.atom)?$', version_details,
{'api_version': 'v1.1'}),
(r'^v2.0/', include(compute_api20_patterns)),
(r'^.*', api_endpoint_not_found),
)
| gpl-3.0 |
Kotaimen/awscfncli | awscfncli2/cli/commands/validate/validate.py | 1 | 1394 | import click
from awscfncli2.cli.context import Context
from awscfncli2.cli.utils.deco import command_exception_handler
from awscfncli2.cli.utils.pprint import echo_pair_if_exists
@click.command('validate')
@click.pass_context
@command_exception_handler
def cli(ctx):
"""Validate template file."""
assert isinstance(ctx.obj, Context)
for stack_context in ctx.obj.runner.contexts:
stack_context.make_boto3_parameters()
ctx.obj.ppt.pprint_stack_name(
stack_context.stack_key,
stack_context.parameters['StackName'],
'Validating '
)
session = stack_context.session
client = session.client('cloudformation')
stack_context.run_packaging()
try:
template_body = stack_context.parameters['TemplateBody']
result = client.validate_template(
TemplateBody=template_body,
)
except KeyError:
template_url = stack_context.parameters['TemplateURL']
result = client.validate_template(
TemplateURL=template_url,
)
click.secho('Validation complete.')
echo_pair_if_exists(result, 'Capabilities', 'Capabilities')
echo_pair_if_exists(result, 'Capabilities Reason', 'CapabilitiesReason')
echo_pair_if_exists(result, 'Declared Transforms', 'DeclaredTransforms')
| mit |
nnethercote/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/tests/test_update.py | 2 | 58773 | import json
import mock
import os
import pytest
import sys
from io import BytesIO
from .. import metadata, manifestupdate
from ..update.update import WPTUpdate
from ..update.base import StepRunner, Step
from mozlog import structuredlog, handlers, formatters
here = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(here, os.pardir, os.pardir, os.pardir))
from manifest import manifest, item as manifest_item
def rel_path_to_test_url(rel_path):
assert not os.path.isabs(rel_path)
return rel_path.replace(os.sep, "/")
def SourceFileWithTest(path, hash, cls, *args):
s = mock.Mock(rel_path=path, hash=hash)
test = cls("/foobar", path, "/", rel_path_to_test_url(path), *args)
s.manifest_items = mock.Mock(return_value=(cls.item_type, [test]))
return s
item_classes = {"testharness": manifest_item.TestharnessTest,
"reftest": manifest_item.RefTest,
"reftest_node": manifest_item.RefTestNode,
"manual": manifest_item.ManualTest,
"stub": manifest_item.Stub,
"wdspec": manifest_item.WebDriverSpecTest,
"conformancechecker": manifest_item.ConformanceCheckerTest,
"visual": manifest_item.VisualTest,
"support": manifest_item.SupportFile}
default_run_info = {"debug": False, "os": "linux", "version": "18.04", "processor": "x86_64", "bits": 64}
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
def reset_globals():
metadata.prop_intern.clear()
metadata.run_info_intern.clear()
metadata.status_intern.clear()
def get_run_info(overrides):
run_info = default_run_info.copy()
run_info.update(overrides)
return run_info
def update(tests, *logs, **kwargs):
full_update = kwargs.pop("full_update", False)
disable_intermittent = kwargs.pop("disable_intermittent", False)
update_intermittent = kwargs.pop("update_intermittent", False)
remove_intermittent = kwargs.pop("remove_intermittent", False)
assert not kwargs
id_test_map, updater = create_updater(tests)
for log in logs:
log = create_log(log)
updater.update_from_log(log)
update_properties = (["debug", "os", "version", "processor"],
{"os": ["version"], "processor": "bits"})
expected_data = {}
metadata.load_expected = lambda _, __, test_path, *args: expected_data.get(test_path)
for test_path, test_ids, test_type, manifest_str in tests:
expected_data[test_path] = manifestupdate.compile(BytesIO(manifest_str),
test_path,
"/",
update_properties)
return list(metadata.update_results(id_test_map,
update_properties,
full_update,
disable_intermittent,
update_intermittent,
remove_intermittent))
def create_updater(tests, url_base="/", **kwargs):
id_test_map = {}
m = create_test_manifest(tests, url_base)
reset_globals()
id_test_map = metadata.create_test_tree(None, m)
return id_test_map, metadata.ExpectedUpdater(id_test_map, **kwargs)
def create_log(entries):
data = BytesIO()
if isinstance(entries, list):
logger = structuredlog.StructuredLogger("expected_test")
handler = handlers.StreamHandler(data, formatters.JSONFormatter())
logger.add_handler(handler)
for item in entries:
action, kwargs = item
getattr(logger, action)(**kwargs)
logger.remove_handler(handler)
else:
json.dump(entries, data)
data.seek(0)
return data
def suite_log(entries, run_info=None):
_run_info = default_run_info.copy()
if run_info:
_run_info.update(run_info)
return ([("suite_start", {"tests": [], "run_info": _run_info})] +
entries +
[("suite_end", {})])
def create_test_manifest(tests, url_base="/"):
source_files = []
for i, (test, _, test_type, _) in enumerate(tests):
if test_type:
source_files.append((SourceFileWithTest(test, str(i) * 40, item_classes[test_type]), True))
m = manifest.Manifest()
m.update(source_files)
return m
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_0():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: FAIL""")]
log = suite_log([("test_start", {"test": "/path/to/test.htm"}),
("test_status", {"test": "/path/to/test.htm",
"subtest": "test1",
"status": "PASS",
"expected": "FAIL"}),
("test_end", {"test": "/path/to/test.htm",
"status": "OK"})])
updated = update(tests, log)
assert len(updated) == 1
assert updated[0][1].is_empty
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_1():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: ERROR""")]
log = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "ERROR"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get("expected", default_run_info) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_known_intermittent_1():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: PASS""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_known_intermittent_2():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: PASS""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_existing_known_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "ERROR",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "ERROR", "FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_remove_previous_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "ERROR",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests,
log_0,
log_1,
log_2,
update_intermittent=True,
remove_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "ERROR"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_new_test_with_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness", None)]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test("test.htm") is None
assert len(new_manifest.get_test(test_id).children) == 1
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_expected_tie_resolution():
tests = [("path/to/test.htm", [test_id], "testharness", None)]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_reorder_expected():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["FAIL", "PASS"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_and_preserve_unchanged_expected_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
if os == "android": [PASS, FAIL]
FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "FAIL",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
log_2 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS",
"expected": "FAIL"})])
updated = update(tests, log_0, log_1, log_2)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "android"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == ["PASS", "FAIL"]
assert new_manifest.get_test(test_id).get(
"expected", default_run_info) == "PASS"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_test_with_intermittent_to_one_expected_status():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "ERROR",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == "ERROR"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_intermittent_with_conditions():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
if os == "android": [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "TIMEOUT",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
updated = update(tests, log_0, log_1, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "android"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == ["PASS", "TIMEOUT", "FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_and_remove_intermittent_with_conditions():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
if os == "android": [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "TIMEOUT",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
updated = update(tests, log_0, log_1, update_intermittent=True, remove_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "android"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == ["PASS", "TIMEOUT"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_skip_0():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: FAIL""")]
log = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log)
assert not updated
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_new_subtest():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
log = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_status", {"test": test_id,
"subtest": "test2",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get("expected", default_run_info) == "FAIL"
assert new_manifest.get_test(test_id).children[1].get("expected", default_run_info) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_0():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "osx"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"debug": False, "os": "osx"})
run_info_2 = default_run_info.copy()
run_info_2.update({"debug": False, "os": "linux"})
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "linux"}) == "TIMEOUT"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_1():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "osx"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "osx"})
run_info_2 = default_run_info.copy()
run_info_2.update({"os": "linux"})
run_info_3 = default_run_info.copy()
run_info_3.update({"os": "win"})
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_2) == "TIMEOUT"
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_3) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_2():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "osx"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": True, "os": "osx"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
run_info_1.update({"debug": False, "os": "osx"})
run_info_2 = default_run_info.copy()
run_info_2.update({"debug": True, "os": "osx"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_2) == "TIMEOUT"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_3():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
if debug: FAIL
if not debug and os == "osx": TIMEOUT""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "osx"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": True, "os": "osx"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
run_info_1.update({"debug": False, "os": "osx"})
run_info_2 = default_run_info.copy()
run_info_2.update({"debug": True, "os": "osx"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_2) == "TIMEOUT"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_ignore_existing():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
if debug: TIMEOUT
if not debug and os == "osx": NOTRUN""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "linux"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": True, "os": "windows"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
run_info_1.update({"debug": False, "os": "linux"})
run_info_2 = default_run_info.copy()
run_info_2.update({"debug": False, "os": "osx"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_2) == "NOTRUN"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_new_test():
tests = [("path/to/test.htm", [test_id], "testharness", None)]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
assert not new_manifest.is_empty
assert new_manifest.get_test("test.htm") is None
assert len(new_manifest.get_test(test_id).children) == 1
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_duplicate():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected: ERROR""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "FAIL"})])
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == "ERROR"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_disable_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected: ERROR""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "FAIL"})])
updated = update(tests, log_0, log_1, disable_intermittent="Some message")
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
assert new_manifest.get_test(test_id).get(
"disabled", run_info_1) == "Some message"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_stability_conditional_instability():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected: ERROR""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS"})],
run_info={"os": "linux"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "FAIL"})],
run_info={"os": "linux"})
log_2 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "FAIL"})],
run_info={"os": "mac"})
updated = update(tests, log_0, log_1, log_2, disable_intermittent="Some message")
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "linux"})
run_info_2 = default_run_info.copy()
run_info_2.update({"os": "mac"})
assert new_manifest.get_test(test_id).get(
"disabled", run_info_1) == "Some message"
with pytest.raises(KeyError):
assert new_manifest.get_test(test_id).get(
"disabled", run_info_2)
assert new_manifest.get_test(test_id).get(
"expected", run_info_2) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_full():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
if debug: TIMEOUT
if not debug and os == "osx": NOTRUN
[test2]
expected: FAIL
[test.js]
[test1]
expected: FAIL
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "ERROR",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": True})
updated = update(tests, log_0, log_1, full_update=True)
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
run_info_1.update({"debug": False, "os": "win"})
run_info_2 = default_run_info.copy()
run_info_2.update({"debug": True, "os": "osx"})
assert not new_manifest.is_empty
assert new_manifest.get_test("test.js") is None
assert len(new_manifest.get_test(test_id).children) == 1
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_2) == "ERROR"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_full_unknown():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
if release_or_beta: ERROR
if not debug and os == "osx": NOTRUN
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "release_or_beta": False})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": True, "release_or_beta": False})
updated = update(tests, log_0, log_1, full_update=True)
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
run_info_1.update({"release_or_beta": False})
run_info_2 = default_run_info.copy()
run_info_2.update({"release_or_beta": True})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_2) == "ERROR"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_default():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
if os == "mac": FAIL
ERROR""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "mac"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "ERROR"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert new_manifest.is_empty
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_default_1():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
if os == "mac": TIMEOUT
ERROR""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"expected": "ERROR",
"status": "FAIL"})],
run_info={"os": "linux"})
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "mac"})
run_info_2 = default_run_info.copy()
run_info_2.update({"os": "win"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == "TIMEOUT"
assert new_manifest.get_test(test_id).get(
"expected", run_info_2) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_default_2():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
if os == "mac": TIMEOUT
ERROR""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"expected": "ERROR",
"status": "TIMEOUT"})],
run_info={"os": "linux"})
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "mac"})
run_info_2 = default_run_info.copy()
run_info_2.update({"os": "win"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == "TIMEOUT"
assert new_manifest.get_test(test_id).get(
"expected", run_info_2) == "TIMEOUT"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_0():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 6,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get("max-asserts") == "7"
assert new_manifest.get_test(test_id).get("min-asserts") == "2"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_1():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 1,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get("max-asserts") == "4"
assert new_manifest.get_test(test_id).has_key("min-asserts") is False
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_2():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 3,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0)
assert not updated
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_3():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 6,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "windows"})
log_1 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 7,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get("max-asserts") == "8"
assert new_manifest.get_test(test_id).get("min-asserts") == "2"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_4():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 6,
"min_expected": 0,
"max_expected": 0}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "windows"})
log_1 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 7,
"min_expected": 0,
"max_expected": 0}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get("max-asserts") == "8"
assert new_manifest.get_test(test_id).has_key("min-asserts") is False
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_0():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"]})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("lsan-allowed") == ["foo"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_1():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
lsan-allowed: [foo]""")]
log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"]}),
("lsan_leak", {"scope": "path/to/",
"frames": ["baz", "foobar"]})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("lsan-allowed") == ["baz", "foo"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_2():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/__dir__", ["path/__dir__"], None, """
lsan-allowed: [foo]"""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"],
"allowed_match": ["foo"]}),
("lsan_leak", {"scope": "path/to/",
"frames": ["baz", "foobar"]})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("lsan-allowed") == ["baz"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_3():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"]})],
run_info={"os": "win"})
log_1 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["baz", "foobar"]})],
run_info={"os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("lsan-allowed") == ["baz", "foo"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_wptreport_0():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: FAIL""")]
log = {"run_info": default_run_info.copy(),
"results": [
{"test": "/path/to/test.htm",
"subtests": [{"name": "test1",
"status": "PASS",
"expected": "FAIL"}],
"status": "OK"}]}
updated = update(tests, log)
assert len(updated) == 1
assert updated[0][1].is_empty
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_wptreport_1():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log = {"run_info": default_run_info.copy(),
"results": [],
"lsan_leaks": [{"scope": "path/to/",
"frames": ["baz", "foobar"]}]}
updated = update(tests, log)
assert len(updated) == 1
assert updated[0][1].get("lsan-allowed") == ["baz"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_0():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 100,
"threshold": 0,
"objects": []})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("leak-threshold") == ['default:51200']
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_1():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 100,
"threshold": 1000,
"objects": []})])
updated = update(tests, log_0)
assert not updated
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_2():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
leak-total: 110""")]
log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 100,
"threshold": 110,
"objects": []})])
updated = update(tests, log_0)
assert not updated
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_3():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
leak-total: 100""")]
log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 1000,
"threshold": 100,
"objects": []})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("leak-threshold") == ['default:51200']
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_4():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
leak-total: 110""")]
log_0 = suite_log([
("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"]}),
("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 100,
"threshold": 110,
"objects": []})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.has_key("leak-threshold") is False
class TestStep(Step):
def create(self, state):
tests = [("path/to/test.htm", [test_id], "testharness", "")]
state.foo = create_test_manifest(tests)
class UpdateRunner(StepRunner):
steps = [TestStep]
@pytest.mark.xfail(sys.version[0] == "3",
reason="update.state doesn't support py3")
def test_update_pickle():
logger = structuredlog.StructuredLogger("expected_test")
args = {
"test_paths": {
"/": {"tests_path": os.path.abspath(os.path.join(here,
os.pardir,
os.pardir,
os.pardir,
os.pardir))},
},
"abort": False,
"continue": False,
"sync": False,
}
args2 = args.copy()
args2["abort"] = True
wptupdate = WPTUpdate(logger, **args2)
wptupdate = WPTUpdate(logger, runner_cls=UpdateRunner, **args)
wptupdate.run()
| mpl-2.0 |
funkaoshi/django-guardian | guardian/tests/admin_test.py | 13 | 16724 | import copy
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.auth.models import User, Group
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test import TestCase
from django.test.client import Client
from guardian.admin import GuardedModelAdmin
from guardian.shortcuts import get_perms
from guardian.shortcuts import get_perms_for_model
class ContentTypeGuardedAdmin(GuardedModelAdmin):
pass
admin.site.register(ContentType, ContentTypeGuardedAdmin)
class AdminTests(TestCase):
def setUp(self):
self.admin = User.objects.create_superuser('admin', '[email protected]',
'admin')
self.user = User.objects.create_user('joe', '[email protected]', 'joe')
self.group = Group.objects.create(name='group')
self.client = Client()
self.obj = ContentType.objects.create(name='foo', model='bar',
app_label='fake-for-guardian-tests')
self.obj_info = self.obj._meta.app_label, self.obj._meta.module_name
def tearDown(self):
self.client.logout()
def _login_superuser(self):
self.client.login(username='admin', password='admin')
def test_view_manage_wrong_obj(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions_manage_user' % self.obj_info,
kwargs={'object_pk': -10, 'user_id': self.user.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_view(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['object'], self.obj)
def test_view_manage_wrong_user(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions_manage_user' % self.obj_info,
kwargs={'object_pk': self.obj.pk, 'user_id': -10})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_view_manage_user_form(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'user': self.user.username, 'submit_manage_user': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
redirect_url = reverse('admin:%s_%s_permissions_manage_user' %
self.obj_info, kwargs={'object_pk': self.obj.pk,
'user_id': self.user.id})
self.assertEqual(response.request['PATH_INFO'], redirect_url)
def test_view_manage_negative_user_form(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
self.user = User.objects.create(username='negative_id_user', id=-2010)
data = {'user': self.user.username, 'submit_manage_user': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
redirect_url = reverse('admin:%s_%s_permissions_manage_user' %
self.obj_info, args=[self.obj.pk, self.user.id])
self.assertEqual(response.request['PATH_INFO'], redirect_url)
def test_view_manage_user_form_wrong_user(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'user': 'wrong-user', 'submit_manage_user': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue('user' in response.context['user_form'].errors)
def test_view_manage_user_form_wrong_field(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'user': '<xss>', 'submit_manage_user': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue('user' in response.context['user_form'].errors)
def test_view_manage_user_form_empty_user(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'user': '', 'submit_manage_user': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue('user' in response.context['user_form'].errors)
def test_view_manage_user_wrong_perms(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions_manage_user' % self.obj_info,
args=[self.obj.pk, self.user.id])
perms = ['change_user'] # This is not self.obj related permission
data = {'permissions': perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue('permissions' in response.context['form'].errors)
def test_view_manage_user(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions_manage_user' % self.obj_info,
args=[self.obj.pk, self.user.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
choices = set([c[0] for c in
response.context['form'].fields['permissions'].choices])
self.assertEqual(
set([ p.codename for p in get_perms_for_model(self.obj)]),
choices,
)
# Add some perms and check if changes were persisted
perms = ['change_%s' % self.obj_info[1], 'delete_%s' % self.obj_info[1]]
data = {'permissions': perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(
set(get_perms(self.user, self.obj)),
set(perms),
)
# Remove perm and check if change was persisted
perms = ['change_%s' % self.obj_info[1]]
data = {'permissions': perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(
set(get_perms(self.user, self.obj)),
set(perms),
)
def test_view_manage_group_form(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'group': self.group.name, 'submit_manage_group': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
redirect_url = reverse('admin:%s_%s_permissions_manage_group' %
self.obj_info, args=[self.obj.pk, self.group.id])
self.assertEqual(response.request['PATH_INFO'], redirect_url)
def test_view_manage_negative_group_form(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
self.group = Group.objects.create(name='neagive_id_group', id=-2010)
data = {'group': self.group.name, 'submit_manage_group': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
redirect_url = reverse('admin:%s_%s_permissions_manage_group' %
self.obj_info, args=[self.obj.pk, self.group.id])
self.assertEqual(response.request['PATH_INFO'], redirect_url)
def test_view_manage_group_form_wrong_group(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'group': 'wrong-group', 'submit_manage_group': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue('group' in response.context['group_form'].errors)
def test_view_manage_group_form_wrong_field(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'group': '<xss>', 'submit_manage_group': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue('group' in response.context['group_form'].errors)
def test_view_manage_group_form_empty_group(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions' % self.obj_info,
args=[self.obj.pk])
data = {'group': '', 'submit_manage_group': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 0)
self.assertEqual(response.status_code, 200)
self.assertTrue('group' in response.context['group_form'].errors)
def test_view_manage_group_wrong_perms(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions_manage_group' %
self.obj_info, args=[self.obj.pk, self.group.id])
perms = ['change_user'] # This is not self.obj related permission
data = {'permissions': perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue('permissions' in response.context['form'].errors)
def test_view_manage_group(self):
self._login_superuser()
url = reverse('admin:%s_%s_permissions_manage_group' %
self.obj_info, args=[self.obj.pk, self.group.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
choices = set([c[0] for c in
response.context['form'].fields['permissions'].choices])
self.assertEqual(
set([ p.codename for p in get_perms_for_model(self.obj)]),
choices,
)
# Add some perms and check if changes were persisted
perms = ['change_%s' % self.obj_info[1], 'delete_%s' % self.obj_info[1]]
data = {'permissions': perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(
set(get_perms(self.group, self.obj)),
set(perms),
)
# Remove perm and check if change was persisted
perms = ['delete_%s' % self.obj_info[1]]
data = {'permissions': perms}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(
set(get_perms(self.group, self.obj)),
set(perms),
)
if 'django.contrib.admin' not in settings.INSTALLED_APPS:
# Skip admin tests if admin app is not registered
# we simpy clean up AdminTests class ...
AdminTests = type('AdminTests', (TestCase,), {})
class GuardedModelAdminTests(TestCase):
def _get_gma(self, attrs=None, name=None, model=None):
"""
Returns ``GuardedModelAdmin`` instance.
"""
attrs = attrs or {}
name = name or 'GMA'
model = model or User
GMA = type(name, (GuardedModelAdmin,), attrs)
gma = GMA(model, admin.site)
return gma
def test_obj_perms_manage_template_attr(self):
attrs = {'obj_perms_manage_template': 'foobar.html'}
gma = self._get_gma(attrs=attrs)
self.assertTrue(gma.get_obj_perms_manage_template(), 'foobar.html')
def test_obj_perms_manage_user_template_attr(self):
attrs = {'obj_perms_manage_user_template': 'foobar.html'}
gma = self._get_gma(attrs=attrs)
self.assertTrue(gma.get_obj_perms_manage_user_template(), 'foobar.html')
def test_obj_perms_manage_user_form_attr(self):
attrs = {'obj_perms_manage_user_form': forms.Form}
gma = self._get_gma(attrs=attrs)
self.assertTrue(gma.get_obj_perms_manage_user_form(), forms.Form)
def test_obj_perms_manage_group_template_attr(self):
attrs = {'obj_perms_manage_group_template': 'foobar.html'}
gma = self._get_gma(attrs=attrs)
self.assertTrue(gma.get_obj_perms_manage_group_template(),
'foobar.html')
def test_obj_perms_manage_group_form_attr(self):
attrs = {'obj_perms_manage_group_form': forms.Form}
gma = self._get_gma(attrs=attrs)
self.assertTrue(gma.get_obj_perms_manage_group_form(), forms.Form)
def test_user_can_acces_owned_objects_only(self):
attrs = {
'user_can_access_owned_objects_only': True,
'user_owned_objects_field': 'user',
}
gma = self._get_gma(attrs=attrs, model=LogEntry)
joe = User.objects.create_user('joe', '[email protected]', 'joe')
jane = User.objects.create_user('jane', '[email protected]', 'jane')
ctype = ContentType.objects.get_for_model(User)
joe_entry = LogEntry.objects.create(user=joe, content_type=ctype,
object_id=joe.id, action_flag=1, change_message='foo')
LogEntry.objects.create(user=jane, content_type=ctype,
object_id=jane.id, action_flag=1, change_message='bar')
request = HttpRequest()
request.user = joe
qs = gma.queryset(request)
self.assertEqual([e.pk for e in qs], [joe_entry.pk])
def test_user_can_acces_owned_objects_only_unless_superuser(self):
attrs = {
'user_can_access_owned_objects_only': True,
'user_owned_objects_field': 'user',
}
gma = self._get_gma(attrs=attrs, model=LogEntry)
joe = User.objects.create_superuser('joe', '[email protected]', 'joe')
jane = User.objects.create_user('jane', '[email protected]', 'jane')
ctype = ContentType.objects.get_for_model(User)
joe_entry = LogEntry.objects.create(user=joe, content_type=ctype,
object_id=joe.id, action_flag=1, change_message='foo')
jane_entry = LogEntry.objects.create(user=jane, content_type=ctype,
object_id=jane.id, action_flag=1, change_message='bar')
request = HttpRequest()
request.user = joe
qs = gma.queryset(request)
self.assertEqual(sorted([e.pk for e in qs]),
sorted([joe_entry.pk, jane_entry.pk]))
class GrappelliGuardedModelAdminTests(TestCase):
org_installed_apps = copy.copy(settings.INSTALLED_APPS)
def _get_gma(self, attrs=None, name=None, model=None):
"""
Returns ``GuardedModelAdmin`` instance.
"""
attrs = attrs or {}
name = name or 'GMA'
model = model or User
GMA = type(name, (GuardedModelAdmin,), attrs)
gma = GMA(model, admin.site)
return gma
def setUp(self):
settings.INSTALLED_APPS = ['grappelli'] + list(settings.INSTALLED_APPS)
def tearDown(self):
settings.INSTALLED_APPS = self.org_installed_apps
def test_get_obj_perms_manage_template(self):
gma = self._get_gma()
self.assertEqual(gma.get_obj_perms_manage_template(),
'admin/guardian/contrib/grappelli/obj_perms_manage.html')
def test_get_obj_perms_manage_user_template(self):
gma = self._get_gma()
self.assertEqual(gma.get_obj_perms_manage_user_template(),
'admin/guardian/contrib/grappelli/obj_perms_manage_user.html')
def test_get_obj_perms_manage_group_template(self):
gma = self._get_gma()
self.assertEqual(gma.get_obj_perms_manage_group_template(),
'admin/guardian/contrib/grappelli/obj_perms_manage_group.html')
| bsd-2-clause |
mahak/ansible | lib/ansible/plugins/connection/winrm.py | 16 | 32540 | # (c) 2014, Chris Church <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Ansible Core Team
name: winrm
short_description: Run tasks over Microsoft's WinRM
description:
- Run commands or put/fetch on a target via WinRM
- This plugin allows extra arguments to be passed that are supported by the protocol but not explicitly defined here.
They should take the form of variables declared with the following pattern `ansible_winrm_<option>`.
version_added: "2.0"
extends_documentation_fragment:
- connection_pipelining
requirements:
- pywinrm (python library)
options:
# figure out more elegant 'delegation'
remote_addr:
description:
- Address of the windows machine
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_winrm_host
type: str
remote_user:
description:
- The user to log in as to the Windows machine
vars:
- name: ansible_user
- name: ansible_winrm_user
type: str
remote_password:
description: Authentication password for the C(remote_user). Can be supplied as CLI option.
vars:
- name: ansible_password
- name: ansible_winrm_pass
- name: ansible_winrm_password
type: str
aliases:
- password # Needed for --ask-pass to come through on delegation
port:
description:
- port for winrm to connect on remote target
- The default is the https (5986) port, if using http it should be 5985
vars:
- name: ansible_port
- name: ansible_winrm_port
default: 5986
type: integer
scheme:
description:
- URI scheme to use
- If not set, then will default to C(https) or C(http) if I(port) is
C(5985).
choices: [http, https]
vars:
- name: ansible_winrm_scheme
type: str
path:
description: URI path to connect to
default: '/wsman'
vars:
- name: ansible_winrm_path
type: str
transport:
description:
- List of winrm transports to attempt to use (ssl, plaintext, kerberos, etc)
- If None (the default) the plugin will try to automatically guess the correct list
- The choices available depend on your version of pywinrm
type: list
vars:
- name: ansible_winrm_transport
kerberos_command:
description: kerberos command to use to request a authentication ticket
default: kinit
vars:
- name: ansible_winrm_kinit_cmd
type: str
kinit_args:
description:
- Extra arguments to pass to C(kinit) when getting the Kerberos authentication ticket.
- By default no extra arguments are passed into C(kinit) unless I(ansible_winrm_kerberos_delegation) is also
set. In that case C(-f) is added to the C(kinit) args so a forwardable ticket is retrieved.
- If set, the args will overwrite any existing defaults for C(kinit), including C(-f) for a delegated ticket.
type: str
vars:
- name: ansible_winrm_kinit_args
version_added: '2.11'
kerberos_mode:
description:
- kerberos usage mode.
- The managed option means Ansible will obtain kerberos ticket.
- While the manual one means a ticket must already have been obtained by the user.
- If having issues with Ansible freezing when trying to obtain the
Kerberos ticket, you can either set this to C(manual) and obtain
it outside Ansible or install C(pexpect) through pip and try
again.
choices: [managed, manual]
vars:
- name: ansible_winrm_kinit_mode
type: str
connection_timeout:
description:
- Sets the operation and read timeout settings for the WinRM
connection.
- Corresponds to the C(operation_timeout_sec) and
C(read_timeout_sec) args in pywinrm so avoid setting these vars
with this one.
- The default value is whatever is set in the installed version of
pywinrm.
vars:
- name: ansible_winrm_connection_timeout
type: int
"""
import base64
import logging
import os
import re
import traceback
import json
import tempfile
import shlex
import subprocess
HAVE_KERBEROS = False
try:
import kerberos
HAVE_KERBEROS = True
except ImportError:
pass
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.errors import AnsibleFileNotFound
from ansible.module_utils.json_utils import _filter_non_json_lines
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six.moves.urllib.parse import urlunsplit
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.six import binary_type, PY3
from ansible.plugins.connection import ConnectionBase
from ansible.plugins.shell.powershell import _parse_clixml
from ansible.utils.hashing import secure_hash
from ansible.utils.display import Display
# getargspec is deprecated in favour of getfullargspec in Python 3 but
# getfullargspec is not available in Python 2
if PY3:
from inspect import getfullargspec as getargspec
else:
from inspect import getargspec
try:
import winrm
from winrm import Response
from winrm.protocol import Protocol
import requests.exceptions
HAS_WINRM = True
except ImportError as e:
HAS_WINRM = False
WINRM_IMPORT_ERR = e
try:
import xmltodict
HAS_XMLTODICT = True
except ImportError as e:
HAS_XMLTODICT = False
XMLTODICT_IMPORT_ERR = e
HAS_PEXPECT = False
try:
import pexpect
# echo was added in pexpect 3.3+ which is newer than the RHEL package
# we can only use pexpect for kerb auth if echo is a valid kwarg
# https://github.com/ansible/ansible/issues/43462
if hasattr(pexpect, 'spawn'):
argspec = getargspec(pexpect.spawn.__init__)
if 'echo' in argspec.args:
HAS_PEXPECT = True
except ImportError as e:
pass
# used to try and parse the hostname and detect if IPv6 is being used
try:
import ipaddress
HAS_IPADDRESS = True
except ImportError:
HAS_IPADDRESS = False
display = Display()
class Connection(ConnectionBase):
'''WinRM connections over HTTP/HTTPS.'''
transport = 'winrm'
module_implementation_preferences = ('.ps1', '.exe', '')
allow_executable = False
has_pipelining = True
allow_extras = True
def __init__(self, *args, **kwargs):
self.always_pipeline_modules = True
self.has_native_async = True
self.protocol = None
self.shell_id = None
self.delegate = None
self._shell_type = 'powershell'
super(Connection, self).__init__(*args, **kwargs)
if not C.DEFAULT_DEBUG:
logging.getLogger('requests_credssp').setLevel(logging.INFO)
logging.getLogger('requests_kerberos').setLevel(logging.INFO)
logging.getLogger('urllib3').setLevel(logging.INFO)
def _build_winrm_kwargs(self):
# this used to be in set_options, as win_reboot needs to be able to
# override the conn timeout, we need to be able to build the args
# after setting individual options. This is called by _connect before
# starting the WinRM connection
self._winrm_host = self.get_option('remote_addr')
self._winrm_user = self.get_option('remote_user')
self._winrm_pass = self.get_option('remote_password')
self._winrm_port = self.get_option('port')
self._winrm_scheme = self.get_option('scheme')
# old behaviour, scheme should default to http if not set and the port
# is 5985 otherwise https
if self._winrm_scheme is None:
self._winrm_scheme = 'http' if self._winrm_port == 5985 else 'https'
self._winrm_path = self.get_option('path')
self._kinit_cmd = self.get_option('kerberos_command')
self._winrm_transport = self.get_option('transport')
self._winrm_connection_timeout = self.get_option('connection_timeout')
if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES)
else:
# for legacy versions of pywinrm, use the values we know are supported
self._winrm_supported_authtypes = set(['plaintext', 'ssl', 'kerberos'])
# calculate transport if needed
if self._winrm_transport is None or self._winrm_transport[0] is None:
# TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
transport_selector = ['ssl'] if self._winrm_scheme == 'https' else ['plaintext']
if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)):
self._winrm_transport = ['kerberos'] + transport_selector
else:
self._winrm_transport = transport_selector
unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes)
if unsupported_transports:
raise AnsibleError('The installed version of WinRM does not support transport(s) %s' %
to_native(list(unsupported_transports), nonstring='simplerepr'))
# if kerberos is among our transports and there's a password specified, we're managing the tickets
kinit_mode = self.get_option('kerberos_mode')
if kinit_mode is None:
# HACK: ideally, remove multi-transport stuff
self._kerb_managed = "kerberos" in self._winrm_transport and (self._winrm_pass is not None and self._winrm_pass != "")
elif kinit_mode == "managed":
self._kerb_managed = True
elif kinit_mode == "manual":
self._kerb_managed = False
# arg names we're going passing directly
internal_kwarg_mask = set(['self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd'])
self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass)
argspec = getargspec(Protocol.__init__)
supported_winrm_args = set(argspec.args)
supported_winrm_args.update(internal_kwarg_mask)
passed_winrm_args = set([v.replace('ansible_winrm_', '') for v in self.get_option('_extras')])
unsupported_args = passed_winrm_args.difference(supported_winrm_args)
# warn for kwargs unsupported by the installed version of pywinrm
for arg in unsupported_args:
display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg))
# pass through matching extras, excluding the list we want to treat specially
for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args):
self._winrm_kwargs[arg] = self.get_option('_extras')['ansible_winrm_%s' % arg]
# Until pykerberos has enough goodies to implement a rudimentary kinit/klist, simplest way is to let each connection
# auth itself with a private CCACHE.
def _kerb_auth(self, principal, password):
if password is None:
password = ""
self._kerb_ccache = tempfile.NamedTemporaryFile()
display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name)
krb5ccname = "FILE:%s" % self._kerb_ccache.name
os.environ["KRB5CCNAME"] = krb5ccname
krb5env = dict(KRB5CCNAME=krb5ccname)
# Stores various flags to call with kinit, these could be explicit args set by 'ansible_winrm_kinit_args' OR
# '-f' if kerberos delegation is requested (ansible_winrm_kerberos_delegation).
kinit_cmdline = [self._kinit_cmd]
kinit_args = self.get_option('kinit_args')
if kinit_args:
kinit_args = [to_text(a) for a in shlex.split(kinit_args) if a.strip()]
kinit_cmdline.extend(kinit_args)
elif boolean(self.get_option('_extras').get('ansible_winrm_kerberos_delegation', False)):
kinit_cmdline.append('-f')
kinit_cmdline.append(principal)
# pexpect runs the process in its own pty so it can correctly send
# the password as input even on MacOS which blocks subprocess from
# doing so. Unfortunately it is not available on the built in Python
# so we can only use it if someone has installed it
if HAS_PEXPECT:
proc_mechanism = "pexpect"
command = kinit_cmdline.pop(0)
password = to_text(password, encoding='utf-8',
errors='surrogate_or_strict')
display.vvvv("calling kinit with pexpect for principal %s"
% principal)
try:
child = pexpect.spawn(command, kinit_cmdline, timeout=60,
env=krb5env, echo=False)
except pexpect.ExceptionPexpect as err:
err_msg = "Kerberos auth failure when calling kinit cmd " \
"'%s': %s" % (command, to_native(err))
raise AnsibleConnectionFailure(err_msg)
try:
child.expect(".*:")
child.sendline(password)
except OSError as err:
# child exited before the pass was sent, Ansible will raise
# error based on the rc below, just display the error here
display.vvvv("kinit with pexpect raised OSError: %s"
% to_native(err))
# technically this is the stdout + stderr but to match the
# subprocess error checking behaviour, we will call it stderr
stderr = child.read()
child.wait()
rc = child.exitstatus
else:
proc_mechanism = "subprocess"
password = to_bytes(password, encoding='utf-8',
errors='surrogate_or_strict')
display.vvvv("calling kinit with subprocess for principal %s"
% principal)
try:
p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=krb5env)
except OSError as err:
err_msg = "Kerberos auth failure when calling kinit cmd " \
"'%s': %s" % (self._kinit_cmd, to_native(err))
raise AnsibleConnectionFailure(err_msg)
stdout, stderr = p.communicate(password + b'\n')
rc = p.returncode != 0
if rc != 0:
# one last attempt at making sure the password does not exist
# in the output
exp_msg = to_native(stderr.strip())
exp_msg = exp_msg.replace(to_native(password), "<redacted>")
err_msg = "Kerberos auth failure for principal %s with %s: %s" \
% (principal, proc_mechanism, exp_msg)
raise AnsibleConnectionFailure(err_msg)
display.vvvvv("kinit succeeded for principal %s" % principal)
def _winrm_connect(self):
'''
Establish a WinRM connection over HTTP/HTTPS.
'''
display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
(self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)
winrm_host = self._winrm_host
if HAS_IPADDRESS:
display.debug("checking if winrm_host %s is an IPv6 address" % winrm_host)
try:
ipaddress.IPv6Address(winrm_host)
except ipaddress.AddressValueError:
pass
else:
winrm_host = "[%s]" % winrm_host
netloc = '%s:%d' % (winrm_host, self._winrm_port)
endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
errors = []
for transport in self._winrm_transport:
if transport == 'kerberos':
if not HAVE_KERBEROS:
errors.append('kerberos: the python kerberos library is not installed')
continue
if self._kerb_managed:
self._kerb_auth(self._winrm_user, self._winrm_pass)
display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
try:
winrm_kwargs = self._winrm_kwargs.copy()
if self._winrm_connection_timeout:
winrm_kwargs['operation_timeout_sec'] = self._winrm_connection_timeout
winrm_kwargs['read_timeout_sec'] = self._winrm_connection_timeout + 1
protocol = Protocol(endpoint, transport=transport, **winrm_kwargs)
# open the shell from connect so we know we're able to talk to the server
if not self.shell_id:
self.shell_id = protocol.open_shell(codepage=65001) # UTF-8
display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host)
return protocol
except Exception as e:
err_msg = to_text(e).strip()
if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I):
raise AnsibleError('the connection attempt timed out')
m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg)
if m:
code = int(m.groups()[0])
if code == 401:
err_msg = 'the specified credentials were rejected by the server'
elif code == 411:
return protocol
errors.append(u'%s: %s' % (transport, err_msg))
display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host)
if errors:
raise AnsibleConnectionFailure(', '.join(map(to_native, errors)))
else:
raise AnsibleError('No transport found for WinRM connection')
def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False):
rq = {'env:Envelope': protocol._get_soap_header(
resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd',
action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send',
shell_id=shell_id)}
stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\
.setdefault('rsp:Stream', {})
stream['@Name'] = 'stdin'
stream['@CommandId'] = command_id
stream['#text'] = base64.b64encode(to_bytes(stdin))
if eof:
stream['@End'] = 'true'
protocol.send_message(xmltodict.unparse(rq))
def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None):
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
if from_exec:
display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
else:
display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
command_id = None
try:
stdin_push_failed = False
command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None))
try:
if stdin_iterator:
for (data, is_last) in stdin_iterator:
self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last)
except Exception as ex:
display.warning("ERROR DURING WINRM SEND INPUT - attempting to recover: %s %s"
% (type(ex).__name__, to_text(ex)))
display.debug(traceback.format_exc())
stdin_push_failed = True
# NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy).
# FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure.
resptuple = self.protocol.get_command_output(self.shell_id, command_id)
# ensure stdout/stderr are text for py3
# FUTURE: this should probably be done internally by pywinrm
response = Response(tuple(to_text(v) if isinstance(v, binary_type) else v for v in resptuple))
# TODO: check result from response and set stdin_push_failed if we have nonzero
if from_exec:
display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
else:
display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host)
display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host)
if stdin_push_failed:
# There are cases where the stdin input failed but the WinRM service still processed it. We attempt to
# see if stdout contains a valid json return value so we can ignore this error
try:
filtered_output, dummy = _filter_non_json_lines(response.std_out)
json.loads(filtered_output)
except ValueError:
# stdout does not contain a return response, stdin input was a fatal error
stderr = to_bytes(response.std_err, encoding='utf-8')
if stderr.startswith(b"#< CLIXML"):
stderr = _parse_clixml(stderr)
raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s'
% (to_native(response.std_out), to_native(stderr)))
return response
except requests.exceptions.Timeout as exc:
raise AnsibleConnectionFailure('winrm connection error: %s' % to_native(exc))
finally:
if command_id:
self.protocol.cleanup_command(self.shell_id, command_id)
def _connect(self):
if not HAS_WINRM:
raise AnsibleError("winrm or requests is not installed: %s" % to_native(WINRM_IMPORT_ERR))
elif not HAS_XMLTODICT:
raise AnsibleError("xmltodict is not installed: %s" % to_native(XMLTODICT_IMPORT_ERR))
super(Connection, self)._connect()
if not self.protocol:
self._build_winrm_kwargs() # build the kwargs from the options set
self.protocol = self._winrm_connect()
self._connected = True
return self
def reset(self):
if not self._connected:
return
self.protocol = None
self.shell_id = None
self._connect()
def _wrapper_payload_stream(self, payload, buffer_size=200000):
payload_bytes = to_bytes(payload)
byte_count = len(payload_bytes)
for i in range(0, byte_count, buffer_size):
yield payload_bytes[i:i + buffer_size], i + buffer_size >= byte_count
def exec_command(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)
# TODO: display something meaningful here
display.vvv("EXEC (via pipeline wrapper)")
stdin_iterator = None
if in_data:
stdin_iterator = self._wrapper_payload_stream(in_data)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator)
result.std_out = to_bytes(result.std_out)
result.std_err = to_bytes(result.std_err)
# parse just stderr from CLIXML output
if result.std_err.startswith(b"#< CLIXML"):
try:
result.std_err = _parse_clixml(result.std_err)
except Exception:
# unsure if we're guaranteed a valid xml doc- use raw output in case of error
pass
return (result.status_code, result.std_out, result.std_err)
# FUTURE: determine buffer size at runtime via remote winrm config?
def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000):
in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict'))
offset = 0
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
for out_data in iter((lambda: in_file.read(buffer_size)), b''):
offset += len(out_data)
self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
# yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded
b64_data = base64.b64encode(out_data) + b'\r\n'
# cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal
yield b64_data, (in_file.tell() == in_size)
if offset == 0: # empty file, return an empty buffer + eof to close it
yield "", True
def put_file(self, in_path, out_path):
super(Connection, self).put_file(in_path, out_path)
out_path = self._shell._unquote(out_path)
display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path))
script_template = u'''
begin {{
$path = '{0}'
$DebugPreference = "Continue"
$ErrorActionPreference = "Stop"
Set-StrictMode -Version 2
$fd = [System.IO.File]::Create($path)
$sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
$bytes = @() #initialize for empty file case
}}
process {{
$bytes = [System.Convert]::FromBase64String($input)
$sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null
$fd.Write($bytes, 0, $bytes.Length)
}}
end {{
$sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null
$hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant()
$fd.Close()
Write-Output "{{""sha1"":""$hash""}}"
}}
'''
script = script_template.format(self._shell._escape(out_path))
cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path))
# TODO: improve error handling
if result.status_code != 0:
raise AnsibleError(to_native(result.std_err))
try:
put_output = json.loads(result.std_out)
except ValueError:
# stdout does not contain a valid response
stderr = to_bytes(result.std_err, encoding='utf-8')
if stderr.startswith(b"#< CLIXML"):
stderr = _parse_clixml(stderr)
raise AnsibleError('winrm put_file failed; \nstdout: %s\nstderr %s' % (to_native(result.std_out), to_native(stderr)))
remote_sha1 = put_output.get("sha1")
if not remote_sha1:
raise AnsibleError("Remote sha1 was not returned")
local_sha1 = secure_hash(in_path)
if not remote_sha1 == local_sha1:
raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1)))
def fetch_file(self, in_path, out_path):
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._shell._unquote(in_path)
out_path = out_path.replace('\\', '/')
# consistent with other connection plugins, we assume the caller has created the target dir
display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
buffer_size = 2**19 # 0.5MB chunks
out_file = None
try:
offset = 0
while True:
try:
script = '''
$path = '%(path)s'
If (Test-Path -Path $path -PathType Leaf)
{
$buffer_size = %(buffer_size)d
$offset = %(offset)d
$stream = New-Object -TypeName IO.FileStream($path, [IO.FileMode]::Open, [IO.FileAccess]::Read, [IO.FileShare]::ReadWrite)
$stream.Seek($offset, [System.IO.SeekOrigin]::Begin) > $null
$buffer = New-Object -TypeName byte[] $buffer_size
$bytes_read = $stream.Read($buffer, 0, $buffer_size)
if ($bytes_read -gt 0) {
$bytes = $buffer[0..($bytes_read - 1)]
[System.Convert]::ToBase64String($bytes)
}
$stream.Close() > $null
}
ElseIf (Test-Path -Path $path -PathType Container)
{
Write-Host "[DIR]";
}
Else
{
Write-Error "$path does not exist";
Exit 1;
}
''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host)
cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
raise IOError(to_native(result.std_err))
if result.std_out.strip() == '[DIR]':
data = None
else:
data = base64.b64decode(result.std_out.strip())
if data is None:
break
else:
if not out_file:
# If out_path is a directory and we're expecting a file, bail out now.
if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')):
break
out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb')
out_file.write(data)
if len(data) < buffer_size:
break
offset += len(data)
except Exception:
traceback.print_exc()
raise AnsibleError('failed to transfer file to "%s"' % to_native(out_path))
finally:
if out_file:
out_file.close()
def close(self):
if self.protocol and self.shell_id:
display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host)
self.protocol.close_shell(self.shell_id)
self.shell_id = None
self.protocol = None
self._connected = False
| gpl-3.0 |
shakalaca/ASUS_ZenFone_ZE550ML_ZE551ML | linux/kernel/tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
Magicked/crits | crits/core/form_consts.py | 6 | 7769 | class Action():
ACTION_TYPE = "Action type"
BEGIN_DATE = "Begin date"
ANALYST = "Analyst"
END_DATE = "End date"
PERFORMED_DATE = "Performed date"
ACTIVE = "Active"
REASON = "Reason"
DATE = "Date"
OBJECT_TYPES = "TLOs"
PREFERRED = "Preferred TLOs"
class Common():
ADD_INDICATOR = "Add Indicator?"
BUCKET_LIST = "Bucket List"
CAMPAIGN = "Campaign"
CAMPAIGN_CONFIDENCE = "Campaign Confidence"
OBJECTS_DATA = "Objects Data"
SOURCE = "Source"
SOURCE_REFERENCE = "Source Reference"
SOURCE_METHOD = "Source Method"
SOURCE_TLP = "Source TLP"
TICKET = "Ticket"
TLP = "Sharing TLP"
CLASS_ATTRIBUTE = "class"
BULK_SKIP = "bulkskip"
BULK_REQUIRED = "bulkrequired"
# class names
Actor = "Actor"
Backdoor = "Backdoor"
Campaign = "Campaign"
Certificate = "Certificate"
Domain = "Domain"
Email = "Email"
Event = "Event"
Exploit = "Exploit"
Indicator = "Indicator"
IP = "IP"
Object = "Object"
PCAP = "PCAP"
RawData = "RawData"
Sample = "Sample"
Signature = "Signature"
Target = "Target"
RELATED_ID = "Related ID"
RELATED_TYPE = "Related Type"
RELATIONSHIP_TYPE = "Relationship Type"
BUCKET_LIST_VARIABLE_NAME = "bucket_list"
TICKET_VARIABLE_NAME = "ticket"
class Status():
"""
Status fields/enumerations used in bulk upload.
"""
STATUS_FIELD = "status";
FAILURE = 0;
SUCCESS = 1;
DUPLICATE = 2;
class Actor():
"""
Constants for Actors.
"""
NAME = "Name"
ALIASES = "Aliases"
DESCRIPTION = "Description"
CAMPAIGN = Common.CAMPAIGN
CAMPAIGN_CONFIDENCE = Common.CAMPAIGN_CONFIDENCE
SOURCE = Common.SOURCE
SOURCE_METHOD = "Source Method"
SOURCE_REFERENCE = Common.SOURCE_REFERENCE
class Backdoor():
"""
Constants for Backdoors.
"""
NAME = "Backdoor name"
ALIASES = "Aliases"
DESCRIPTION = "Description"
CAMPAIGN = Common.CAMPAIGN
CAMPAIGN_CONFIDENCE = Common.CAMPAIGN_CONFIDENCE
VERSION = "Version"
SOURCE = Common.SOURCE
SOURCE_METHOD = "Source Method"
SOURCE_REFERENCE = Common.SOURCE_REFERENCE
class Exploit():
"""
Constants for Exploits.
"""
NAME = "Name"
DESCRIPTION = "Description"
CVE = "CVE"
CAMPAIGN = Common.CAMPAIGN
CAMPAIGN_CONFIDENCE = Common.CAMPAIGN_CONFIDENCE
VERSION = "Version"
SOURCE = Common.SOURCE
SOURCE_METHOD = "Source Method"
SOURCE_REFERENCE = Common.SOURCE_REFERENCE
class Campaign():
"""
Constants for Campaigns.
"""
NAME = "Name"
class Certificate():
"""
Constants for Certificates.
"""
SOURCE = Common.SOURCE
SOURCE_METHOD = Common.SOURCE_METHOD
SOURCE_REFERENCE = Common.SOURCE_REFERENCE
class IP():
"""
Constants for IPs.
"""
IP_ADDRESS = "IP Address"
IP_TYPE = "IP Type"
ANALYST = "Analyst"
CAMPAIGN = Common.CAMPAIGN
CAMPAIGN_CONFIDENCE = Common.CAMPAIGN_CONFIDENCE
SOURCE = Common.SOURCE
SOURCE_METHOD = Common.SOURCE_METHOD
SOURCE_REFERENCE = Common.SOURCE_REFERENCE
SOURCE_TLP = Common.SOURCE_TLP
ADD_INDICATOR = Common.ADD_INDICATOR
INDICATOR_REFERENCE = "Indicator Reference"
IP_DATE = "IP Date"
IP_SOURCE = "IP Source"
IP_METHOD = "IP Source Method"
IP_REFERENCE = "IP Source Reference"
IP_TLP = "IP Source TLP"
CACHED_RESULTS = "ip_cached_results"
class Domain():
"""
Constants for Domains.
"""
DOMAIN_NAME = "Domain Name"
CAMPAIGN = Common.CAMPAIGN
CAMPAIGN_CONFIDENCE = Common.CAMPAIGN_CONFIDENCE
DOMAIN_SOURCE = Common.SOURCE
DOMAIN_METHOD = Common.SOURCE_METHOD
DOMAIN_REFERENCE = Common.SOURCE_REFERENCE
ADD_IP_ADDRESS = "Add IP Address?"
IP_ADDRESS = IP.IP_ADDRESS
IP_TYPE = IP.IP_TYPE
IP_DATE = IP.IP_DATE
SAME_SOURCE = "Use Domain Source"
IP_SOURCE = IP.IP_SOURCE
IP_METHOD = IP.IP_METHOD
IP_REFERENCE = IP.IP_REFERENCE
IP_TLP = IP.IP_TLP
ADD_INDICATORS = "Add Indicator(s)?"
CACHED_RESULTS = "domain_cached_results"
class Email():
"""
Constants for Emails.
"""
SOURCE = Common.SOURCE
SOURCE_METHOD = Common.SOURCE_METHOD
SOURCE_REFERENCE = Common.SOURCE_REFERENCE
class Event():
"""
Constants for Events.
"""
TITLE = "Title"
SOURCE = Common.SOURCE
SOURCE_METHOD = Common.SOURCE_METHOD
SOURCE_REFERENCE = Common.SOURCE_REFERENCE
class Indicator():
"""
Constants for Indicators.
"""
SOURCE = Common.SOURCE
SOURCE_METHOD = Common.SOURCE_METHOD
SOURCE_REFERENCE = Common.SOURCE_REFERENCE
class NotificationType():
ALERT = 'alert'
ERROR = 'error'
INFORMATION = 'information'
NOTIFICATION = 'notification'
SUCCESS = 'success'
WARNING = 'warning'
ALL = [ALERT, ERROR, INFORMATION, NOTIFICATION, SUCCESS, WARNING]
class Object():
"""
Constants for Objects.
"""
OBJECT_TYPE_INDEX = 0
VALUE_INDEX = 1
SOURCE_INDEX = 2
METHOD_INDEX = 3
REFERENCE_INDEX = 4
ADD_INDICATOR_INDEX = 5
OBJECT_TYPE = "Object Type"
VALUE = "Value"
SOURCE = Common.SOURCE
METHOD = "Method"
REFERENCE = "Reference"
PARENT_OBJECT_TYPE = "Otype"
PARENT_OBJECT_ID = "Oid"
ADD_INDICATOR = Common.ADD_INDICATOR
class PCAP():
"""
Constants for PCAPs.
"""
SOURCE = Common.SOURCE
SOURCE_METHOD = Common.SOURCE_METHOD
SOURCE_REFERENCE = Common.SOURCE_REFERENCE
class RawData():
"""
Constants for RawData.
"""
SOURCE = Common.SOURCE
SOURCE_METHOD = Common.SOURCE_METHOD
SOURCE_REFERENCE = Common.SOURCE_REFERENCE
class Sample():
"""
Constants for Samples.
"""
BUCKET_LIST = Common.BUCKET_LIST
CAMPAIGN = Common.CAMPAIGN
CAMPAIGN_CONFIDENCE = Common.CAMPAIGN_CONFIDENCE
EMAIL_RESULTS = "Email Me Results"
FILE_DATA = "File Data"
FILE_FORMAT = "File Format"
FILE_NAME = "File Name"
INHERIT_CAMPAIGNS = "Inherit Campaigns?"
INHERIT_SOURCES = "Inherit Sources?"
MD5 = "MD5"
MIMETYPE = "Mimetype"
RELATED_MD5 = "Related MD5"
PASSWORD = "Password"
SHA1 = "SHA1"
SHA256 = "SHA256"
SIZE = "SIZE"
SOURCE = Common.SOURCE
SOURCE_METHOD = Common.SOURCE_METHOD
SOURCE_REFERENCE = Common.SOURCE_REFERENCE
SOURCE_TLP = Common.SOURCE_TLP
UPLOAD_TYPE = "Upload Type"
DESCRIPTION = "Description"
CACHED_RESULTS = "sample_cached_results"
class UploadType():
FILE_UPLOAD = "File Upload"
METADATA_UPLOAD = "Metadata Upload"
class Signature():
"""
Constants for Signature. Dependencies as list? Similar to bucket list, but not in other classes
"""
SOURCE = Common.SOURCE
SOURCE_METHOD = Common.SOURCE_METHOD
SOURCE_REFERENCE = Common.SOURCE_REFERENCE
class Target():
"""
Constants for Targets.
"""
TITLE = "Title"
CAMPAIGN = Common.CAMPAIGN
CAMPAIGN_CONFIDENCE = Common.CAMPAIGN_CONFIDENCE
def get_source_field_for_class(otype):
"""
Based on the CRITs type, get the source field constant.
:param otype: The CRITs type.
:type otype: str.
:returns: str
"""
class_to_source_field_map = {
Common.Certificate: Certificate.SOURCE,
Common.Domain: Domain.DOMAIN_SOURCE,
Common.Email: Email.SOURCE,
Common.Event: Event.SOURCE,
Common.Indicator: Indicator.SOURCE,
Common.IP: IP.SOURCE,
Common.Object: Object.SOURCE,
Common.PCAP: PCAP.SOURCE,
Common.RawData: RawData.SOURCE,
Common.Sample: Sample.SOURCE,
Common.Signature: Signature.SOURCE,
}
return class_to_source_field_map.get(otype)
| mit |
hlmnrmr/liveblog | server/liveblog/syndication/consumer.py | 1 | 5521 | import logging
from bson import ObjectId
from superdesk.resource import Resource
from superdesk.services import BaseService
from superdesk.errors import SuperdeskApiError
from superdesk import get_resource_service
from flask import current_app as app
from flask import Blueprint
from flask_cors import CORS
from liveblog.utils.api import api_response
from .exceptions import APIConnectionError, ConsumerAPIError
from .syndication import WEBHOOK_METHODS
from .utils import generate_api_key, send_api_request, trailing_slash, blueprint_superdesk_token_auth
from .tasks import check_webhook_status
logger = logging.getLogger('superdesk')
consumers_blueprint = Blueprint('consumers', __name__)
CORS(consumers_blueprint)
consumers_schema = {
'name': {
'type': 'string',
'unique': True
},
'contacts': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
'first_name': {
'type': 'string',
},
'last_name': {
'type': 'string',
},
'email': {
'type': 'email',
'required': True
},
'phone': {
'type': 'string',
'nullable': True
}
}
}
},
'api_key': {
'type': 'string',
'unique': True
},
'webhook_url': {
'type': 'string',
'required': True,
'uniqueurl': True,
'httpsurl': {
'key_field': None,
'check_auth': False,
'webhook': True
}
},
'webhook_enabled': {
'type': 'boolean',
'default': False,
'required': False
},
'picture_url': {
'type': 'string',
'nullable': True
},
'avatar': Resource.rel('upload', embeddable=True, nullable=True),
'avatar_renditions': {'type': 'dict'}
}
class ConsumerService(BaseService):
notification_key = 'consumers'
def _cursor(self, resource=None):
resource = resource or self.datasource
return app.data.mongo.pymongo(resource=resource).db[resource]
def _get_consumer(self, consumer):
if isinstance(consumer, (str, ObjectId)):
consumer = self.find_one(_id=consumer, req=None)
return consumer
def _send_webhook_request(self, consumer_id, consumer_blog_token=None, method='GET', data=None, json_loads=True,
timeout=5):
consumer = self._get_consumer(consumer_id)
if not consumer:
raise ConsumerAPIError('Unable to get consumer "{}".'.format(consumer_id))
api_url = trailing_slash(consumer['webhook_url'])
try:
response = send_api_request(api_url, consumer_blog_token, method=method, data=data, json_loads=json_loads,
timeout=timeout)
except APIConnectionError as e:
raise ConsumerAPIError(str(e))
else:
return response
def send_post(self, syndication_out, new_post, action='created'):
blog_token = syndication_out['token']
consumer_id = syndication_out['consumer_id']
if action not in WEBHOOK_METHODS:
raise NotImplementedError('send_syndication_post "{}" not implemented yet.'.format(action))
else:
return self._send_webhook_request(consumer_id, blog_token, method=WEBHOOK_METHODS[action], data=new_post)
def on_create(self, docs):
for doc in docs:
if 'webhook_url' in doc:
doc['webhook_url'] = trailing_slash(doc['webhook_url'])
if not doc.get('api_key'):
doc['api_key'] = generate_api_key()
super().on_create(docs)
def on_created(self, docs):
super().on_created(docs)
for doc in docs:
check_webhook_status.delay(doc['_id'])
def on_update(self, updates, original):
if 'webhook_url' in updates:
updates['webhook_url'] = trailing_slash(updates['webhook_url'])
if 'api_key' in updates and updates['api_key'] != original['api_key']:
updates['api_key'] = generate_api_key()
super().on_update(updates, original)
check_webhook_status.delay(original['_id'])
def on_delete(self, doc):
out_service = get_resource_service('syndication_out')
if (out_service.consumer_is_syndicating(doc['_id'])):
raise SuperdeskApiError.forbiddenError(
message='Not allowed to delete a consumer who is currently syndicating a blog'
)
super().on_delete(doc)
class ConsumerResource(Resource):
datasource = {
'source': 'consumers',
'search_backend': None,
'default_sort': [('_updated', -1)],
}
item_methods = ['GET', 'PATCH', 'PUT', 'DELETE']
privileges = {'POST': 'consumers', 'PATCH': 'consumers', 'PUT': 'consumers', 'DELETE': 'consumers'}
schema = consumers_schema
@consumers_blueprint.route('/api/consumers/<consumer_id>/check_connection', methods=['GET'])
def consumer_check_connection(consumer_id):
consumers = get_resource_service('consumers')
consumer = consumers.find_one(_id=consumer_id, req=None)
if not consumer:
return api_response('invalid consumer id', 404)
check_webhook_status(consumer_id)
return api_response('OK', 200)
consumers_blueprint.before_request(blueprint_superdesk_token_auth)
| agpl-3.0 |
aguirrea/lucy | tests/lfootGraph.py | 1 | 6007 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Andrés Aguirre Dorelo
#
# MINA/INCO/UDELAR
#
# module for finding the steps in the tutors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import glob
import ntpath
from parser.BvhImport import BvhImport
import matplotlib.pyplot as plt
from configuration.LoadSystemConfiguration import LoadSystemConfiguration
import numpy as np
from scipy.signal import argrelextrema
from collections import Counter
sysConf = LoadSystemConfiguration()
BVHDir = os.getcwd() + sysConf.getDirectory("CMU mocap Files")
Y_THREADHOLD = 11 #TODO calculate this as the average of the steps_highs
X_THREADHOLD = 36
def firstMax(values1, values2):
res=0
for i in range(len(values1)-2):
if values1[i] < values1[i+1] and values1[i+1] > values1[i+2]: #i+1 is a local maximun
if (values1[i] - values2[i]) > THREADHOLD:
res=i+1
elif values1[i] < values1[i+1] < values1[i+2]: #i is a local maximun
if (values1[i] - values2[i]) > THREADHOLD:
res=i
return res
def find_nearest(a, a0):
"Element in nd array `a` closest to the scalar value `a0`"
idx = np.abs(a - a0).argmin()
return a.flat[idx]
for filename in glob.glob(os.path.join(BVHDir, '*.bvh')):
print "transforming: " + filename + " ..."
parser = BvhImport(filename)
x_,y_,z_ = parser.getNodePositionsFromName("lFoot")
y1 = []
y2 = []
x1 = []
x2 = []
for key, value in y_.iteritems():
y1.append(value)
x1.append(key)
x_,y_,z_ = parser.getNodePositionsFromName("rFoot")
for key, value in y_.iteritems():
y2.append(value)
x2.append(key)
maxLfootIndexes = [x for x in argrelextrema(np.array(y1), np.greater)[0]]
maxRfootIndexes = [x for x in argrelextrema(np.array(y2), np.greater)[0]]
stepsLfootIndexes = []
for i in range(len(maxLfootIndexes)):
index = maxLfootIndexes[i]
if y1[index] - y2[index] > Y_THREADHOLD: #one foot is up and the other is in the floor
if len(stepsLfootIndexes)>0:
if abs(index - find_nearest(np.array(stepsLfootIndexes), index) > X_THREADHOLD): #avoid max near an existing point
stepsLfootIndexes.append(index)
print "appeend L"
else:
if y1[find_nearest(np.array(stepsLfootIndexes), index)] < y1[index]: #check if the exiting near max is a local maximun
print "remove L", find_nearest(np.array(stepsLfootIndexes), index), "from: ", stepsLfootIndexes
stepsLfootIndexes.remove(find_nearest(np.array(stepsLfootIndexes), index))
print "remove L"
stepsLfootIndexes.append(index)
print "appeend L"
else:
stepsLfootIndexes.append(index)
print "appeend L"
stepsRfootIndexes = []
for i in range(len(maxRfootIndexes)):
index = maxRfootIndexes[i]
if y2[index] - y1[index] > Y_THREADHOLD: #one foot is up and the other is in the floor
if len(stepsRfootIndexes)>0:
if abs(index - find_nearest(np.array(stepsRfootIndexes),index) > X_THREADHOLD): #avoid max near an existing point
stepsRfootIndexes.append(index)
print "appeend R"
else:
if y2[find_nearest(np.array(stepsRfootIndexes), index)] < y2[index]: #check if the exiting near max is a local maximun
print "remove R", find_nearest(np.array(stepsRfootIndexes), index), "from: ", stepsRfootIndexes, "index: ", index
stepsRfootIndexes.remove(find_nearest(np.array(stepsRfootIndexes), index))
print "remove R"
stepsRfootIndexes.append(index)
print "appeend R"
else:
stepsRfootIndexes.append(index)
print "appeend R"
if stepsLfootIndexes[0] < stepsRfootIndexes[0]:
if len(stepsLfootIndexes) > 2:
testPoint = stepsLfootIndexes[1]
while(y1[testPoint]>y2[testPoint]):
testPoint = testPoint + 1
end = testPoint + 5
print "red over green| ", "red: ", stepsLfootIndexes[0], "green: ", stepsRfootIndexes[0], "second red: ", stepsLfootIndexes[1], "end: ", end
else:
end = len(y1)
print "red over green| ", "red: ", stepsLfootIndexes[0], "green: ", stepsRfootIndexes[0], "second red: -----", "end: ", end
else:
if len(stepsRfootIndexes) > 2:
testPoint = stepsRfootIndexes[1]
while(y2[testPoint]>y1[testPoint]):
testPoint = testPoint + 1
end = testPoint + 5
print "green over red| ", "green: ", stepsRfootIndexes[0], "red: ", stepsLfootIndexes[0], "second green: ", stepsRfootIndexes[1], "end: ", end
else:
end = len(y2)
print "green over red| ", "green: ", stepsRfootIndexes[0], "red: ", stepsLfootIndexes[0], "second green: -----", "end: ", end
plt.plot(x1, y1,'ro')
plt.plot(x1, y2,'g')
plt.show()
| gpl-3.0 |
otmaneJai/Zipline | zipline/utils/tradingcalendar_bmf.py | 17 | 7576 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
from datetime import datetime
from dateutil import rrule
from zipline.utils.tradingcalendar import end, canonicalize_datetime, \
get_open_and_closes
start = pd.Timestamp('1994-01-01', tz='UTC')
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
# Universal confraternization
conf_universal = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(conf_universal)
# Sao Paulo city birthday
aniversario_sao_paulo = rrule.rrule(
rrule.MONTHLY,
bymonth=1,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(aniversario_sao_paulo)
# Carnival Monday
carnaval_segunda = rrule.rrule(
rrule.MONTHLY,
byeaster=-48,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(carnaval_segunda)
# Carnival Tuesday
carnaval_terca = rrule.rrule(
rrule.MONTHLY,
byeaster=-47,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(carnaval_terca)
# Passion of the Christ
sexta_paixao = rrule.rrule(
rrule.MONTHLY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(sexta_paixao)
# Corpus Christi
corpus_christi = rrule.rrule(
rrule.MONTHLY,
byeaster=60,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(corpus_christi)
tiradentes = rrule.rrule(
rrule.MONTHLY,
bymonth=4,
bymonthday=21,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(tiradentes)
# Labor day
dia_trabalho = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
bymonthday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(dia_trabalho)
# Constitutionalist Revolution
constitucionalista = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=9,
cache=True,
dtstart=datetime(1997, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(constitucionalista)
# Independency day
independencia = rrule.rrule(
rrule.MONTHLY,
bymonth=9,
bymonthday=7,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(independencia)
# Our Lady of Aparecida
aparecida = rrule.rrule(
rrule.MONTHLY,
bymonth=10,
bymonthday=12,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(aparecida)
# All Souls' day
finados = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
bymonthday=2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(finados)
# Proclamation of the Republic
proclamacao_republica = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
bymonthday=15,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(proclamacao_republica)
# Day of Black Awareness
consciencia_negra = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
bymonthday=20,
cache=True,
dtstart=datetime(2004, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(consciencia_negra)
# Christmas Eve
vespera_natal = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(vespera_natal)
# Christmas
natal = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(natal)
# New Year Eve
ano_novo = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=31,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(ano_novo)
# New Year Eve on saturday
ano_novo_sab = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=30,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(ano_novo_sab)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
# World Cup 2014 Opening
non_trading_days.append(datetime(2014, 6, 12, tzinfo=pytz.utc))
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
# Ash Wednesday
quarta_cinzas = rrule.rrule(
rrule.MONTHLY,
byeaster=-46,
cache=True,
dtstart=start,
until=end
)
def get_early_closes(start, end):
# TSX closed at 1:00 PM on december 24th.
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
early_close_rules = []
early_close_rules.append(quarta_cinzas)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
early_closes.sort()
return pd.DatetimeIndex(early_closes)
early_closes = get_early_closes(start, end)
def get_open_and_close(day, early_closes):
# only "early close" event in Bovespa actually is a late start
# as the market only opens at 1pm
open_hour = 13 if day in quarta_cinzas else 10
market_open = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=open_hour,
minute=00),
tz='America/Sao_Paulo').tz_convert('UTC')
market_close = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=16),
tz='America/Sao_Paulo').tz_convert('UTC')
return market_open, market_close
open_and_closes = get_open_and_closes(trading_days, early_closes,
get_open_and_close)
| apache-2.0 |
mcgachey/edx-platform | lms/djangoapps/certificates/urls.py | 56 | 1067 | """
URLs for the certificates app.
"""
from django.conf.urls import patterns, url
from django.conf import settings
from certificates import views
urlpatterns = patterns(
'',
# Certificates HTML view
url(
r'^user/(?P<user_id>[^/]*)/course/{course_id}'.format(course_id=settings.COURSE_ID_PATTERN),
views.render_html_view,
name='html_view'
),
# End-points used by student support
# The views in the lms/djangoapps/support use these end-points
# to retrieve certificate information and regenerate certificates.
url(r'search', views.search_by_user, name="search"),
url(r'regenerate', views.regenerate_certificate_for_user, name="regenerate_certificate_for_user"),
)
if settings.FEATURES.get("ENABLE_OPENBADGES", False):
urlpatterns += (
url(
r'^badge_share_tracker/{}/(?P<network>[^/]+)/(?P<student_username>[^/]+)/$'.format(
settings.COURSE_ID_PATTERN
),
views.track_share_redirect,
name='badge_share_tracker'
),
)
| agpl-3.0 |
mtanida/image_collection_browser | scripts/generate_coordinates.py | 1 | 3397 | # -*- coding: utf-8 -*-
# Description: generates a json file that contains the item ids in order they appear in the UI
# Example usage:
# python generate_coordinates.py ../data/ ../js/coords.json 100 10 10 50 20 3
from PIL import Image
import json
import math
import os
import sys
# input
if len(sys.argv) < 8:
print "Usage: %s <inputdir of data> <outputfile json> <images per row> <image cell width> <image cell height> <group item threshold> <group threshold> <min group rows>" % sys.argv[0]
sys.exit(1)
INPUT_DATA_DIR = sys.argv[1]
OUTPUT_FILE = sys.argv[2]
ITEMS_PER_ROW = int(sys.argv[3])
ITEM_W = int(sys.argv[4])
ITEM_H = int(sys.argv[5])
GROUP_ITEM_THRESHOLD = int(sys.argv[6])
GROUP_THRESHOLD = int(sys.argv[7])
MIN_GROUP_ROWS = int(sys.argv[8])
# init
coords = {
'centuries': [],
'genres': [],
'collections': [],
'colors': []
}
def getItemsIds(the_group, the_items):
ids = []
if isinstance(the_items[0], list):
items = [{'id': item_i, 'score': group_value[1]} for item_i, group_value in enumerate(the_items) if group_value[0] == the_group['index']]
items = sorted(items, key=lambda k: k['score'], reverse=True)
ids = [i['id'] for i in items]
else:
ids = [item_i for item_i, group_i in enumerate(the_items) if group_i == the_group['index']]
return ids
def getGroups(groupName):
global INPUT_DATA_DIR
global GROUP_ITEM_THRESHOLD
global GROUP_THRESHOLD
item_groups = []
groups = []
_groups = []
with open(INPUT_DATA_DIR + groupName + '.json') as data_file:
_groups = json.load(data_file)
with open(INPUT_DATA_DIR + 'item_' + groupName + '.json') as data_file:
item_groups = json.load(data_file)
# Take out unknown group
unknown = next(iter([g for g in _groups if not g['value']]), False)
other = {
'count': 0,
'items': []
}
# Add items to appropriate groups
for i,g in enumerate(_groups):
if g['value']:
item_ids = getItemsIds(g, item_groups)
# this group is too small; add to "other" group
if g['count'] < GROUP_ITEM_THRESHOLD and len(_groups) > GROUP_THRESHOLD:
other['items'].extend(item_ids)
other['count'] += g['count']
else:
g['items'] = item_ids
groups.append(g)
# Add "other" group
if other['count']:
groups.append(other)
# Add "uknown" group
if unknown:
unknown['items'] = getItemsIds(unknown, item_groups)
groups.append(unknown)
return groups
for groupName in coords:
item_coords = []
groups = getGroups(groupName)
for g in groups:
for itemId in g['items']:
item_coords.append(itemId)
# determine extra blank rows
rows = int(math.ceil(1.0 * g['count'] / ITEMS_PER_ROW))
extra_rows = max(MIN_GROUP_ROWS - rows, 0)
# determine extra blank items in last row
extra_items_in_last_row = rows * ITEMS_PER_ROW - g['count']
blanks = extra_rows * ITEMS_PER_ROW + extra_items_in_last_row
# fill the blanks with placeholders
for blank in range(blanks):
item_coords.append(-1)
coords[groupName] = item_coords
# Write out data
with open(OUTPUT_FILE, 'w') as outfile:
json.dump(coords, outfile)
print "Wrote coords to " + OUTPUT_FILE
| cc0-1.0 |
cloudbase/nova | nova/tests/unit/api/openstack/compute/test_flavor_manage.py | 4 | 17133 | # Copyright 2011 Andrew Bogott for the Wikimedia Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_serialization import jsonutils
import six
import webob
from nova.api.openstack.compute import flavor_access as flavor_access_v21
from nova.api.openstack.compute import flavor_manage as flavormanage_v21
from nova.compute import flavors
from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
def fake_db_flavor(**updates):
db_flavor = {
'root_gb': 1,
'ephemeral_gb': 1,
'name': u'frob',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 19, 18, 49, 30, 877329),
'updated_at': None,
'memory_mb': 256,
'vcpus': 1,
'flavorid': 1,
'swap': 0,
'rxtx_factor': 1.0,
'extra_specs': {},
'deleted_at': None,
'vcpu_weight': None,
'id': 7,
'is_public': True,
'disabled': False,
}
if updates:
db_flavor.update(updates)
return db_flavor
def fake_create(newflavor):
newflavor['flavorid'] = 1234
newflavor["name"] = 'test'
newflavor["memory_mb"] = 512
newflavor["vcpus"] = 2
newflavor["root_gb"] = 1
newflavor["ephemeral_gb"] = 1
newflavor["swap"] = 512
newflavor["rxtx_factor"] = 1.0
newflavor["is_public"] = True
newflavor["disabled"] = False
class FlavorManageTestV21(test.NoDBTestCase):
controller = flavormanage_v21.FlavorManageController()
validation_error = exception.ValidationError
base_url = '/v2/fake/flavors'
def setUp(self):
super(FlavorManageTestV21, self).setUp()
self.stub_out("nova.objects.Flavor.create", fake_create)
self.request_body = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": six.text_type('1234'),
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
self.expected_flavor = self.request_body
def _get_http_request(self, url=''):
return fakes.HTTPRequest.blank(url)
@property
def app(self):
return fakes.wsgi_app_v21(init_only=('os-flavor-manage',
'os-flavor-rxtx',
'os-flavor-access', 'flavors',
'os-flavor-extra-data'))
@mock.patch('nova.objects.Flavor.destroy')
def test_delete(self, mock_destroy):
res = self.controller._delete(self._get_http_request(), 1234)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller,
flavormanage_v21.FlavorManageController):
status_int = self.controller._delete.wsgi_code
else:
status_int = res.status_int
self.assertEqual(202, status_int)
# subsequent delete should fail
mock_destroy.side_effect = exception.FlavorNotFound(flavor_id=1234)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._delete, self._get_http_request(),
1234)
def _test_create_missing_parameter(self, parameter):
body = {
"flavor": {
"name": "azAZ09. -_",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": six.text_type('1234'),
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
del body['flavor'][parameter]
self.assertRaises(self.validation_error, self.controller._create,
self._get_http_request(), body=body)
def test_create_missing_name(self):
self._test_create_missing_parameter('name')
def test_create_missing_ram(self):
self._test_create_missing_parameter('ram')
def test_create_missing_vcpus(self):
self._test_create_missing_parameter('vcpus')
def test_create_missing_disk(self):
self._test_create_missing_parameter('disk')
def _create_flavor_success_case(self, body, req=None):
req = req if req else self._get_http_request(url=self.base_url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(self.app)
self.assertEqual(200, res.status_code)
return jsonutils.loads(res.body)
def test_create(self):
body = self._create_flavor_success_case(self.request_body)
for key in self.expected_flavor["flavor"]:
self.assertEqual(body["flavor"][key],
self.expected_flavor["flavor"][key])
def test_create_public_default(self):
del self.request_body['flavor']['os-flavor-access:is_public']
body = self._create_flavor_success_case(self.request_body)
for key in self.expected_flavor["flavor"]:
self.assertEqual(body["flavor"][key],
self.expected_flavor["flavor"][key])
def test_create_without_flavorid(self):
del self.request_body['flavor']['id']
body = self._create_flavor_success_case(self.request_body)
for key in self.expected_flavor["flavor"]:
self.assertEqual(body["flavor"][key],
self.expected_flavor["flavor"][key])
def _create_flavor_bad_request_case(self, body):
self.stubs.UnsetAll()
self.assertRaises(self.validation_error, self.controller._create,
self._get_http_request(), body=body)
def test_create_invalid_name(self):
self.request_body['flavor']['name'] = 'bad !@#!$%\x00 name'
self._create_flavor_bad_request_case(self.request_body)
def test_create_flavor_name_is_whitespace(self):
self.request_body['flavor']['name'] = ' '
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_name_too_long(self):
self.request_body['flavor']['name'] = 'a' * 256
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_name_leading_trailing_spaces(self):
self.request_body['flavor']['name'] = ' test '
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_name_leading_trailing_spaces_compat_mode(self):
req = self._get_http_request(url=self.base_url)
req.set_legacy_v2()
self.request_body['flavor']['name'] = ' test '
body = self._create_flavor_success_case(self.request_body, req)
self.assertEqual('test', body['flavor']['name'])
def test_create_without_flavorname(self):
del self.request_body['flavor']['name']
self._create_flavor_bad_request_case(self.request_body)
def test_create_empty_body(self):
body = {
"flavor": {}
}
self._create_flavor_bad_request_case(body)
def test_create_no_body(self):
body = {}
self._create_flavor_bad_request_case(body)
def test_create_invalid_format_body(self):
body = {
"flavor": []
}
self._create_flavor_bad_request_case(body)
def test_create_invalid_flavorid(self):
self.request_body['flavor']['id'] = "!@#!$#!$^#&^$&"
self._create_flavor_bad_request_case(self.request_body)
def test_create_check_flavor_id_length(self):
MAX_LENGTH = 255
self.request_body['flavor']['id'] = "a" * (MAX_LENGTH + 1)
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_leading_trailing_whitespaces_in_flavor_id(self):
self.request_body['flavor']['id'] = " bad_id "
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_ram(self):
del self.request_body['flavor']['ram']
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_0_ram(self):
self.request_body['flavor']['ram'] = 0
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_ram_exceed_max_limit(self):
self.request_body['flavor']['ram'] = db.MAX_INT + 1
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_vcpus(self):
del self.request_body['flavor']['vcpus']
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_0_vcpus(self):
self.request_body['flavor']['vcpus'] = 0
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_vcpus_exceed_max_limit(self):
self.request_body['flavor']['vcpus'] = db.MAX_INT + 1
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_disk(self):
del self.request_body['flavor']['disk']
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_disk(self):
self.request_body['flavor']['disk'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_disk_exceed_max_limit(self):
self.request_body['flavor']['disk'] = db.MAX_INT + 1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_ephemeral(self):
self.request_body['flavor']['OS-FLV-EXT-DATA:ephemeral'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_ephemeral_exceed_max_limit(self):
self.request_body['flavor'][
'OS-FLV-EXT-DATA:ephemeral'] = db.MAX_INT + 1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_swap(self):
self.request_body['flavor']['swap'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_swap_exceed_max_limit(self):
self.request_body['flavor']['swap'] = db.MAX_INT + 1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_rxtx_factor(self):
self.request_body['flavor']['rxtx_factor'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_rxtx_factor_exceed_max_limit(self):
self.request_body['flavor']['rxtx_factor'] = db.SQL_SP_FLOAT_MAX * 2
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_non_boolean_is_public(self):
self.request_body['flavor']['os-flavor-access:is_public'] = 123
self._create_flavor_bad_request_case(self.request_body)
def test_flavor_exists_exception_returns_409(self):
expected = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": 1235,
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
def fake_create(name, memory_mb, vcpus, root_gb, ephemeral_gb,
flavorid, swap, rxtx_factor, is_public):
raise exception.FlavorExists(name=name)
self.stubs.Set(flavors, "create", fake_create)
self.assertRaises(webob.exc.HTTPConflict, self.controller._create,
self._get_http_request(), body=expected)
def test_invalid_memory_mb(self):
"""Check negative and decimal number can't be accepted."""
self.stubs.UnsetAll()
self.assertRaises(exception.InvalidInput, flavors.create, "abc",
-512, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcd",
512.2, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcde",
None, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
512, 2, None, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
"test_memory_mb", 2, None, 1, 1234, 512, 1, True)
class PrivateFlavorManageTestV21(test.TestCase):
controller = flavormanage_v21.FlavorManageController()
base_url = '/v2/fake/flavors'
def setUp(self):
super(PrivateFlavorManageTestV21, self).setUp()
self.flavor_access_controller = (flavor_access_v21.
FlavorAccessController())
self.expected = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"swap": 512,
"rxtx_factor": 1
}
}
@property
def app(self):
return fakes.wsgi_app_v21(init_only=('os-flavor-manage',
'os-flavor-access',
'os-flavor-rxtx', 'flavors',
'os-flavor-extra-data'),
fake_auth_context=self._get_http_request().
environ['nova.context'])
def _get_http_request(self, url=''):
return fakes.HTTPRequest.blank(url)
def _get_response(self):
req = self._get_http_request(self.base_url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(self.expected)
res = req.get_response(self.app)
return jsonutils.loads(res.body)
def test_create_private_flavor_should_not_grant_flavor_access(self):
self.expected["flavor"]["os-flavor-access:is_public"] = False
body = self._get_response()
for key in self.expected["flavor"]:
self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
# Because for normal user can't access the non-public flavor without
# access. So it need admin context at here.
flavor_access_body = self.flavor_access_controller.index(
fakes.HTTPRequest.blank('', use_admin_context=True),
body["flavor"]["id"])
expected_flavor_access_body = {
"tenant_id": 'fake',
"flavor_id": "%s" % body["flavor"]["id"]
}
self.assertNotIn(expected_flavor_access_body,
flavor_access_body["flavor_access"])
def test_create_public_flavor_should_not_create_flavor_access(self):
self.expected["flavor"]["os-flavor-access:is_public"] = True
body = self._get_response()
for key in self.expected["flavor"]:
self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
class FlavorManagerPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FlavorManagerPolicyEnforcementV21, self).setUp()
self.controller = flavormanage_v21.FlavorManageController()
def test_create_policy_failed(self):
rule_name = "os_compute_api:os-flavor-manage"
self.policy.set_rules({rule_name: "project:non_fake"})
req = fakes.HTTPRequest.blank('')
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._create, req,
body={"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"swap": 512,
"rxtx_factor": 1,
}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_delete_policy_failed(self):
rule_name = "os_compute_api:os-flavor-manage"
self.policy.set_rules({rule_name: "project:non_fake"})
req = fakes.HTTPRequest.blank('')
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._delete, req,
fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
| apache-2.0 |
user-none/calibre | src/calibre/ebooks/mobi/debug/mobi8.py | 14 | 13718 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
from future_builtins import map
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import sys, os, struct, textwrap
from itertools import izip
from calibre import CurrentDir
from calibre.ebooks.mobi.debug.containers import ContainerHeader
from calibre.ebooks.mobi.debug.headers import TextRecord
from calibre.ebooks.mobi.debug.index import (SKELIndex, SECTIndex, NCXIndex,
GuideIndex)
from calibre.ebooks.mobi.utils import read_font_record, decode_tbs, RECORD_SIZE
from calibre.ebooks.mobi.debug import format_bytes
from calibre.ebooks.mobi.reader.headers import NULL_INDEX
from calibre.utils.imghdr import what
class FDST(object):
def __init__(self, raw):
if raw[:4] != b'FDST':
raise ValueError('KF8 does not have a valid FDST record')
self.sec_off, self.num_sections = struct.unpack_from(b'>LL', raw, 4)
if self.sec_off != 12:
raise ValueError('FDST record has unknown extra fields')
secf = b'>%dL' % (self.num_sections*2)
secs = struct.unpack_from(secf, raw, self.sec_off)
rest = raw[self.sec_off+struct.calcsize(secf):]
if rest:
raise ValueError('FDST record has trailing data: '
'%s'%format_bytes(rest))
self.sections = tuple(izip(secs[::2], secs[1::2]))
def __str__(self):
ans = ['FDST record']
a = lambda k, v:ans.append('%s: %s'%(k, v))
a('Offset to sections', self.sec_off)
a('Number of section records', self.num_sections)
ans.append('**** %d Sections ****'% len(self.sections))
for sec in self.sections:
ans.append('Start: %20d End: %d'%sec)
return '\n'.join(ans)
class File(object):
def __init__(self, skel, skeleton, text, first_aid, sections):
self.name = 'part%04d'%skel.file_number
self.skeleton, self.text, self.first_aid = skeleton, text, first_aid
self.sections = sections
def dump(self, ddir):
with open(os.path.join(ddir, self.name + '.html'), 'wb') as f:
f.write(self.text)
base = os.path.join(ddir, self.name + '-parts')
os.mkdir(base)
with CurrentDir(base):
with open('skeleton.html', 'wb') as f:
f.write(self.skeleton)
for i, text in enumerate(self.sections):
with open('sect-%04d.html'%i, 'wb') as f:
f.write(text)
class MOBIFile(object):
def __init__(self, mf):
self.mf = mf
h, h8 = mf.mobi_header, mf.mobi8_header
first_text_record = 1
offset = 0
self.resource_ranges = [(h8.first_resource_record, h8.last_resource_record, h8.first_image_index)]
if mf.kf8_type == 'joint':
offset = h.exth.kf8_header_index
self.resource_ranges.insert(0, (h.first_resource_record, h.last_resource_record, h.first_image_index))
self.text_records = [TextRecord(i, r, h8.extra_data_flags,
mf.decompress8) for i, r in
enumerate(mf.records[first_text_record+offset:
first_text_record+offset+h8.number_of_text_records])]
self.raw_text = b''.join(r.raw for r in self.text_records)
self.header = self.mf.mobi8_header
self.extract_resources(mf.records)
self.read_fdst()
self.read_indices()
self.build_files()
self.read_tbs()
def print_header(self, f=sys.stdout):
print (str(self.mf.palmdb).encode('utf-8'), file=f)
print (file=f)
print ('Record headers:', file=f)
for i, r in enumerate(self.mf.records):
print ('%6d. %s'%(i, r.header), file=f)
print (file=f)
print (str(self.mf.mobi8_header).encode('utf-8'), file=f)
def read_fdst(self):
self.fdst = None
if self.header.fdst_idx != NULL_INDEX:
idx = self.header.fdst_idx
self.fdst = FDST(self.mf.records[idx].raw)
if self.fdst.num_sections != self.header.fdst_count:
raise ValueError('KF8 Header contains invalid FDST count')
def read_indices(self):
self.skel_index = SKELIndex(self.header.skel_idx, self.mf.records,
self.header.encoding)
self.sect_index = SECTIndex(self.header.sect_idx, self.mf.records,
self.header.encoding)
self.ncx_index = NCXIndex(self.header.primary_index_record,
self.mf.records, self.header.encoding)
self.guide_index = GuideIndex(self.header.oth_idx, self.mf.records,
self.header.encoding)
def build_files(self):
text = self.raw_text
self.files = []
for skel in self.skel_index.records:
sects = [x for x in self.sect_index.records if x.file_number == skel.file_number]
skeleton = text[skel.start_position:skel.start_position+skel.length]
ftext = skeleton
first_aid = sects[0].toc_text
sections = []
for sect in sects:
start_pos = skel.start_position + skel.length + sect.start_pos
sect_text = text[start_pos:start_pos+sect.length]
insert_pos = sect.insert_pos - skel.start_position
ftext = ftext[:insert_pos] + sect_text + ftext[insert_pos:]
sections.append(sect_text)
self.files.append(File(skel, skeleton, ftext, first_aid, sections))
def dump_flows(self, ddir):
boundaries = [(0, len(self.raw_text))]
if self.fdst is not None:
boundaries = self.fdst.sections
for i, x in enumerate(boundaries):
start, end = x
raw = self.raw_text[start:end]
with open(os.path.join(ddir, 'flow%04d.txt'%i), 'wb') as f:
f.write(raw)
def extract_resources(self, records):
self.resource_map = []
self.containers = []
known_types = {b'FLIS', b'FCIS', b'SRCS',
b'\xe9\x8e\r\n', b'RESC', b'BOUN', b'FDST', b'DATP',
b'AUDI', b'VIDE', b'CRES', b'CONT', b'CMET', b'PAGE'}
container = None
for i, rec in enumerate(records):
for (l, r, offset) in self.resource_ranges:
if l <= i <= r:
resource_index = i + 1
if offset is not None and resource_index >= offset:
resource_index -= offset
break
else:
continue
sig = rec.raw[:4]
payload = rec.raw
ext = 'dat'
prefix = 'binary'
suffix = ''
if sig in {b'HUFF', b'CDIC', b'INDX'}:
continue
# TODO: Ignore CNCX records as well
if sig == b'FONT':
font = read_font_record(rec.raw)
if font['err']:
raise ValueError('Failed to read font record: %s Headers: %s'%(
font['err'], font['headers']))
payload = (font['font_data'] if font['font_data'] else
font['raw_data'])
prefix, ext = 'fonts', font['ext']
elif sig == b'CONT':
if payload == b'CONTBOUNDARY':
self.containers.append(container)
container = None
continue
container = ContainerHeader(payload)
elif sig == b'CRES':
container.resources.append(payload)
if container.is_image_container:
payload = payload[12:]
q = what(None, payload)
if q:
prefix, ext = 'hd-images', q
resource_index = len(container.resources)
elif sig == b'\xa0\xa0\xa0\xa0' and len(payload) == 4:
if container is None:
print ('Found an end of container record with no container, ignoring')
else:
container.resources.append(None)
continue
elif sig not in known_types:
if container is not None and len(container.resources) == container.num_of_resource_records:
container.add_hrefs(payload)
continue
q = what(None, rec.raw)
if q:
prefix, ext = 'images', q
if prefix == 'binary':
if sig == b'\xe9\x8e\r\n':
suffix = '-EOF'
elif sig in known_types:
suffix = '-' + sig.decode('ascii')
self.resource_map.append(('%s/%06d%s.%s'%(prefix, resource_index, suffix, ext),
payload))
def read_tbs(self):
from calibre.ebooks.mobi.writer8.tbs import (Entry, DOC,
collect_indexing_data, encode_strands_as_sequences,
sequences_to_bytes, calculate_all_tbs, NegativeStrandIndex)
entry_map = []
for index in self.ncx_index:
vals = list(index)[:-1] + [None, None, None, None]
entry_map.append(Entry(*(vals[:12])))
indexing_data = collect_indexing_data(entry_map, list(map(len,
self.text_records)))
self.indexing_data = [DOC + '\n' +textwrap.dedent('''\
Index Entry lines are of the form:
depth:index_number [action] parent (index_num-parent) Geometry
Where Geometry is the start and end of the index entry w.r.t
the start of the text record.
''')]
tbs_type = 8
try:
calculate_all_tbs(indexing_data)
except NegativeStrandIndex:
calculate_all_tbs(indexing_data, tbs_type=5)
tbs_type = 5
for i, strands in enumerate(indexing_data):
rec = self.text_records[i]
tbs_bytes = rec.trailing_data.get('indexing', b'')
desc = ['Record #%d'%i]
for s, strand in enumerate(strands):
desc.append('Strand %d'%s)
for entries in strand.itervalues():
for e in entries:
desc.append(
' %s%d [%-9s] parent: %s (%d) Geometry: (%d, %d)'%(
e.depth * (' ') + '- ', e.index, e.action, e.parent,
e.index-(e.parent or 0), e.start-i*RECORD_SIZE,
e.start+e.length-i*RECORD_SIZE))
desc.append('TBS Bytes: ' + format_bytes(tbs_bytes))
flag_sz = 3
sequences = []
otbs = tbs_bytes
while tbs_bytes:
try:
val, extra, consumed = decode_tbs(tbs_bytes, flag_size=flag_sz)
except:
break
flag_sz = 4
tbs_bytes = tbs_bytes[consumed:]
extra = {bin(k):v for k, v in extra.iteritems()}
sequences.append((val, extra))
for j, seq in enumerate(sequences):
desc.append('Sequence #%d: %r %r'%(j, seq[0], seq[1]))
if tbs_bytes:
desc.append('Remaining bytes: %s'%format_bytes(tbs_bytes))
calculated_sequences = encode_strands_as_sequences(strands,
tbs_type=tbs_type)
try:
calculated_bytes = sequences_to_bytes(calculated_sequences)
except:
calculated_bytes = b'failed to calculate tbs bytes'
if calculated_bytes != otbs:
print ('WARNING: TBS mismatch for record %d'%i)
desc.append('WARNING: TBS mismatch!')
desc.append('Calculated sequences: %r'%calculated_sequences)
desc.append('')
self.indexing_data.append('\n'.join(desc))
def inspect_mobi(mobi_file, ddir):
f = MOBIFile(mobi_file)
with open(os.path.join(ddir, 'header.txt'), 'wb') as out:
f.print_header(f=out)
alltext = os.path.join(ddir, 'raw_text.html')
with open(alltext, 'wb') as of:
of.write(f.raw_text)
for x in ('text_records', 'images', 'fonts', 'binary', 'files', 'flows', 'hd-images',):
os.mkdir(os.path.join(ddir, x))
for rec in f.text_records:
rec.dump(os.path.join(ddir, 'text_records'))
for href, payload in f.resource_map:
with open(os.path.join(ddir, href), 'wb') as fo:
fo.write(payload)
for i, container in enumerate(f.containers):
with open(os.path.join(ddir, 'container%d.txt' % (i + 1)), 'wb') as cf:
cf.write(str(container).encode('utf-8'))
if f.fdst:
with open(os.path.join(ddir, 'fdst.record'), 'wb') as fo:
fo.write(str(f.fdst).encode('utf-8'))
with open(os.path.join(ddir, 'skel.record'), 'wb') as fo:
fo.write(str(f.skel_index).encode('utf-8'))
with open(os.path.join(ddir, 'chunks.record'), 'wb') as fo:
fo.write(str(f.sect_index).encode('utf-8'))
with open(os.path.join(ddir, 'ncx.record'), 'wb') as fo:
fo.write(str(f.ncx_index).encode('utf-8'))
with open(os.path.join(ddir, 'guide.record'), 'wb') as fo:
fo.write(str(f.guide_index).encode('utf-8'))
with open(os.path.join(ddir, 'tbs.txt'), 'wb') as fo:
fo.write(('\n'.join(f.indexing_data)).encode('utf-8'))
for part in f.files:
part.dump(os.path.join(ddir, 'files'))
f.dump_flows(os.path.join(ddir, 'flows'))
| gpl-3.0 |
Bluetide/Cactus | cactus/deployment/engine.py | 9 | 4330 | #coding:utf-8
import os
import logging
from cactus.deployment.file import BaseFile
from cactus.utils.filesystem import fileList
from cactus.utils.helpers import get_or_prompt, memoize
from cactus.utils.parallel import multiMap, PARALLEL_DISABLED
logger = logging.getLogger(__name__)
class BaseDeploymentEngine(object):
FileClass = BaseFile
CredentialsManagerClass = None #TODO: Define interface?
config_bucket_name = None
config_bucket_website = None
_index_page = "index.html"
_error_page = "error.html"
_connection = None
def __init__(self, site):
"""
:param site: An instance of cactus.site.Site
"""
self.site = site
self.credentials_manager = self.CredentialsManagerClass(self)
def deploy(self):
self.configure()
# Upload all files concurrently in a thread pool
mapper = multiMap
if self.site._parallel <= PARALLEL_DISABLED:
mapper = map
totalFiles = mapper(lambda p: p.upload(), self.files())
return totalFiles
def _ignore_file(self, path):
if os.path.basename(path).startswith("."):
return True
# Special case for Finder Icon files
if "\r" in os.path.basename(path):
return True
return False
@memoize
def files(self):
"""
List of build files.
"""
return [self.FileClass(self, file_path) for file_path in fileList(
self.site.build_path, relative=True) if self._ignore_file(file_path) is False]
def total_bytes(self):
"""
Total size of files to be uploaded
"""
return sum([f.total_bytes for f in self.files()])
def total_bytes_uploaded(self):
"""
Total size of files to be uploaded
"""
return sum([f.total_bytes_uploaded for f in self.files()])
def progress(self):
"""
Progress of upload in percentage
"""
total_bytes = float(self.total_bytes())
total_bytes_uploaded = float(self.total_bytes_uploaded())
if total_bytes == 0 or total_bytes_uploaded == 0:
return 0.0
return total_bytes_uploaded / total_bytes
def get_connection(self):
if self._connection is None:
self._connection = self._create_connection()
return self._connection
def _create_connection(self):
"""
Should return a Connection object
"""
raise NotImplementedError()
def get_bucket(self):
"""
Should return a Bucket object, None if the bucket does not exist.
"""
raise NotImplementedError()
def create_bucket(self):
"""
Should create and return a Bucket object.
"""
raise NotImplementedError()
def get_website_endpoint(self):
"""
Should return the Website endpoint for the bucket.
"""
#TODO: Normalize -- rackspace gives an URL, but Amazon gives a domain name
raise NotImplementedError()
def configure(self):
"""
This is when the DeploymentEngine should configure itself to prepare for deployment
:rtype: None
"""
self.bucket_name = get_or_prompt(self.site.config, self.config_bucket_name, self.site.ui.prompt_normalized,
"Enter the bucket name (e.g.: www.example.com)")
#TODO: Validate this is not empty
self.bucket = self.get_bucket() #TODO: Catch auth errors
#TODO: Make this all integrated and consistent!
created = False
if self.bucket is None:
if self.site.ui.prompt_yes_no("Bucket does not exist. Create it?"):
self.bucket = self.create_bucket()
created = True
else:
return
website_endpoint = self.get_website_endpoint()
self.site.config.set(self.config_bucket_website, website_endpoint)
self.site.config.write()
self.credentials_manager.save_credentials()
if created:
logger.info('Bucket %s was created with website endpoint %s', self.bucket_name, website_endpoint)
logger.info("Bucket Name: %s", self.bucket_name)
logger.info("Bucket Web Endpoint: %s", website_endpoint)
| bsd-3-clause |
disabler/isida3 | scripts/update3_0to3_1.py | 2 | 6918 | import os, sys
global clean, rename, cur, conn
os.chdir('..')
settings_folder = 'settings/%s'
conference_config_path = settings_folder % 'conference.config'
owner_config_path = settings_folder % 'owner.config'
configname = settings_folder % 'config.py'
topbase_path = settings_folder % 'topbase.db'
aliases_path = settings_folder % 'aliases'
blacklist_path = settings_folder % 'blacklist.db'
conoff_path = settings_folder % 'commonoff'
conf_path = settings_folder % 'conf'
feed_path = settings_folder % 'feed'
hidenrooms_path = settings_folder % 'hidenroom.db'
botignore_path = settings_folder % 'ignore'
botowner_path = settings_folder % 'owner'
locale_path = settings_folder % 'locale'
logroom_path = settings_folder % 'logroom.db'
spy_path = settings_folder % 'spy.db'
tban_path = settings_folder % 'temporary.ban'
saytoowner_path = settings_folder % 'saytoowner.db'
ignoreban_path = settings_folder % 'ignoreban.db'
ar = {'--help':'This page',
'--all':'Import all files',
'--owner-config':'Import owners config',
'--conference-config':'Import conference config',
'--top':'Import statictic for `top` command',
'--alias':'Import aliases',
'--blacklist':'Import blacklist of rooms',
'--comm':'Import list of disabled commands',
'--rooms':'Import list of bot\'s rooms',
'--rss':'Import RSS/ATOM feeds',
'--hiden-rooms':'Import list of hiden rooms',
'--bot-ignore':'Import patterns for bot ignore',
'--locale':'Import current locale name',
'--logs':'Import rooms with enabled logs',
'--bot-owner':'Import list of bot owners',
'--spy':'Import spy for room activity',
'--tban':'Import list of temporary bans',
'--say-to-owner':'Import base for `msgtoadmin` command',
'--ignore-ban':'Import list of rooms with ignore global ban',
'--clean':'Remove source file(s) after import',
'--rename':'Rename source file(s) after import'
}
def readfile(filename):
fp = file(filename)
data = fp.read()
fp.close()
return data
def errorHandler(text):
print text
sys.exit()
def mv(file):
newfile = file.split('/')
newfile = '/'.join(newfile[:-1]+['_%s' % newfile[-1]])
os.system('mv %s %s' % (file,newfile))
def remove(file):
try: os.remove(file)
except: pass
def import_file(filename,basename,eval_for,eval_string):
try:
print 'Import: %s' % filename
c = eval(readfile(filename))
cnt = 0
for t in eval(eval_for):
r = eval(eval_string)
req = 'insert into %s values(%s);' % (basename,','.join(['%'+a for a in 's'*len(r)]))
cnt += 1
cur.execute(req,r)
print '\t%s record(s)' % cnt
conn.commit()
if rename: mv(filename)
elif clean: remove(filename)
except Exception, SM: print 'Error [%s] while proceed file %s' % (str(SM),filename)
print 'Updater for Isida Jabber Bot from 3.0 to 3.1'
print '(c) Disabler Production Lab.'
base_charset, base_type = 'utf8', 'pgsql'
if os.path.isfile(configname): execfile(configname)
else: errorHandler('%s is missed.' % configname)
try: _,_,_,_,_ = base_name,base_user,base_host,base_pass,base_port
except: errorHandler('Missed settings for SQL DB!')
arg = sys.argv[1:]
if arg:
clean = '--clean' in arg
rename = '--rename' in arg
all = '--all' in arg
if '--help' in arg:
ark = ar.keys()
ark.sort()
print 'Usage:\n%s' % '\n'.join(['%-32s%s' % (t,ar[t]) for t in ark])
else:
for t in arg:
if t not in ar.keys():
print 'Unknown option: %s\nUse `--help` option' % t
os._exit(0)
if base_type == 'pgsql':
import psycopg2
conn = psycopg2.connect(database=base_name, user=base_user, host=base_host, password=base_pass, port=base_port)
elif base_type == 'mysql':
import MySQLdb
conn = MySQLdb.connect(db=base_name, user=base_user, host=base_host, passwd=base_pass, port=int(base_port), charset=base_charset)
else: errorHandler('Unknown database backend!')
print 'Base type: %s' % base_type
cur = conn.cursor()
# --------- Conference config --------- #
if '--conference-config' in arg or all:
try:
print 'Import: %s' % conference_config_path
c = eval(readfile(conference_config_path))
to_base,cnt = 'config_conf',0
for t in c.keys():
for tmp in c[t].keys():
r = (t,tmp,unicode(c[t][tmp]))
req = 'insert into %s values(%s);' % (to_base,','.join(['%'+a for a in 's'*len(r)]))
cnt += 1
cur.execute(req,r)
print '\t%s record(s)' % cnt
conn.commit()
if rename: mv(conference_config_path)
elif clean: remove(conference_config_path)
except Exception, SM: print 'Error [%s] while proceed file %s' % (str(SM),conference_config_path)
# --------- Owner config --------- #
if '--owner-config' in arg or all: import_file(owner_config_path,'config_owner','c.keys()','(t,unicode(c[t]))')
# --------- Top command --------- #
if '--top' in arg or all: import_file(topbase_path,'top','c','t')
# --------- Aliases --------- #
if '--alias' in arg or all: import_file(aliases_path,'alias','c','t')
# --------- Blacklist of rooms --------- #
if '--blacklist' in arg or all: import_file(blacklist_path,'blacklist','c','(t,)')
# --------- Comm ON/OFF --------- #
if '--comm' in arg or all: import_file(conoff_path,'commonoff','c','t')
# --------- Conferences list --------- #
if '--rooms' in arg or all: import_file(conf_path,'conference','c',"list(t.split('\\n',1)+[''])[:2]")
# --------- Feeds --------- #
if '--rss' in arg or all: import_file(feed_path,'feed','c','t')
# --------- Hiden rooms --------- #
if '--hiden-rooms' in arg or all: import_file(hidenrooms_path,'hiden_rooms','c','(t,)')
# --------- Bot Ignore --------- #
if '--bot-ignore' in arg or all: import_file(botignore_path,'bot_ignore','c',"[('%%%s%%' % t,),(t,)]['@' in t]")
# --------- Bot Locale --------- #
if '--locale' in arg or all: import_file(locale_path,'config_owner','range(0,1)',"('bot_locale',c)")
# --------- Logs in rooms --------- #
if '--logs' in arg or all: import_file(logroom_path,'log_rooms','c','(t,)')
# --------- Bot owners --------- #
if '--bot-owner' in arg or all: import_file(botowner_path,'bot_owner','c','(t,)')
# --------- Spy for rooms activity --------- #
if '--spy' in arg or all: import_file(spy_path,'spy','c','t')
# --------- Temporary ban --------- #
if '--tban' in arg or all: import_file(tban_path,'tmp_ban','c','t')
# --------- Say to owner --------- #
if '--say-to-owner' in arg or all: import_file(saytoowner_path,'saytoowner','c.keys()','(t,c[t])')
# --------- Ignore ban --------- #
if '--ignore-ban' in arg or all: import_file(ignoreban_path,'ignore_ban','c','(t,)')
print 'Finished!'
cur.close()
conn.commit()
conn.close()
else: print 'use `--help` option'
# The end is near!
| gpl-3.0 |
earshel/PokeyPyManager | POGOProtos/Networking/Requests/Messages/SfidaActionLogMessage_pb2.py | 15 | 1948 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Requests/Messages/SfidaActionLogMessage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Requests/Messages/SfidaActionLogMessage.proto',
package='POGOProtos.Networking.Requests.Messages',
syntax='proto3',
serialized_pb=_b('\nCPOGOProtos/Networking/Requests/Messages/SfidaActionLogMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\"\x17\n\x15SfidaActionLogMessageb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SFIDAACTIONLOGMESSAGE = _descriptor.Descriptor(
name='SfidaActionLogMessage',
full_name='POGOProtos.Networking.Requests.Messages.SfidaActionLogMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=112,
serialized_end=135,
)
DESCRIPTOR.message_types_by_name['SfidaActionLogMessage'] = _SFIDAACTIONLOGMESSAGE
SfidaActionLogMessage = _reflection.GeneratedProtocolMessageType('SfidaActionLogMessage', (_message.Message,), dict(
DESCRIPTOR = _SFIDAACTIONLOGMESSAGE,
__module__ = 'POGOProtos.Networking.Requests.Messages.SfidaActionLogMessage_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.SfidaActionLogMessage)
))
_sym_db.RegisterMessage(SfidaActionLogMessage)
# @@protoc_insertion_point(module_scope)
| mit |
Murali-group/GraphSpace | graphspace/settings/local.py | 1 | 1543 | from graphspace.settings.base import *
# variables for setting up account through which GraphSpace emails
EMAIL_HOST = 'NONE'
EMAIL_HOST_USER = 'NONE'
EMAIL_HOST_PASSWORD = 'NONE'
# If error is thrown, display the error in the browser (ONLY FOR LOCAL MACHINES)
DEBUG = True
TEMPLATE_DEBUG = True
MAINTENANCE = False
# URL through which to access graphspace
URL_PATH = "http://localhost:8000/"
# If tracking is enabled for GraphSpace in Google Analytics
GOOGLE_ANALYTICS_PROPERTY_ID = 'UA-00000000-0'
# Keys given by creating a requestor account on Amazon Mechanical Turk (https://www.mturk.com/mturk/welcome)
AWSACCESSKEYID = 'None'
SECRETKEY = 'None'
# Path to GraphSPace
PATH = "/Path_to_GraphSpace"
# SHOULD NEVER CHANGE THIS VALUE
SECRET_KEY = 'this-is-a-secret-key-for-local-settings-only'
# If needing to test on production mturk account (real money)
# AWS_URL = 'https://mechanicalturk.amazonaws.com'
# Sandbox (development) MTURK (fake money used)
AWS_URL = 'https://mechanicalturk.sandbox.amazonaws.com'
# To configure the application to use the Console Backend for sending e-mail. It writes e-mails to standard out instead of sending them.
# http://stackoverflow.com/questions/4642011/test-sending-email-without-email-server
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'test_database',
'USER': 'adb',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5432'
}
}
| gpl-2.0 |
av8ramit/tensorflow | tensorflow/contrib/distributions/python/ops/distribution_util.py | 11 | 19545 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import linalg
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions.util import * # pylint: disable=wildcard-import
def _convert_to_tensor(x, name):
return None if x is None else ops.convert_to_tensor(x, name=name)
def mixture_stddev(mixture_weight_vector, mean_vector, stddev_vector):
"""Computes the standard deviation of a mixture distribution.
This function works regardless of the component distribution, so long as
each component's mean and standard deviation can be provided.
Args:
mixture_weight_vector: A 2D tensor with shape [batch_size, num_components]
mean_vector: A 2D tensor of mixture component means. Has shape
`[batch_size, num_components]`.
stddev_vector: A 2D tensor of mixture component standard deviations. Has
shape `[batch_size, num_components]`.
Returns:
A 1D tensor of shape `[batch_size]` representing the standard deviation of
the mixture distribution with given weights and component means and standard
deviations.
Raises:
ValueError: If the shapes of the input tensors are not as expected.
"""
mixture_weight_vector.shape.assert_has_rank(2)
if not mean_vector.shape.is_compatible_with(mixture_weight_vector.shape):
raise ValueError("Expecting means to have same shape as mixture weights.")
if not stddev_vector.shape.is_compatible_with(mixture_weight_vector.shape):
raise ValueError("Expecting stddevs to have same shape as mixture weights.")
# Reshape the distribution parameters for batched vectorized dot products.
pi_for_dot_prod = array_ops.expand_dims(mixture_weight_vector, axis=1)
mu_for_dot_prod = array_ops.expand_dims(mean_vector, axis=2)
sigma_for_dot_prod = array_ops.expand_dims(stddev_vector, axis=2)
# weighted average of component means under mixture distribution.
mean_wa = math_ops.matmul(pi_for_dot_prod, mu_for_dot_prod)
mean_wa = array_ops.reshape(mean_wa, (-1,))
# weighted average of component variances under mixture distribution.
var_wa = math_ops.matmul(pi_for_dot_prod,
math_ops.square(sigma_for_dot_prod))
var_wa = array_ops.reshape(var_wa, (-1,))
# weighted average of component squared means under mixture distribution.
sq_mean_wa = math_ops.matmul(pi_for_dot_prod,
math_ops.square(mu_for_dot_prod))
sq_mean_wa = array_ops.reshape(sq_mean_wa, (-1,))
mixture_variance = var_wa + sq_mean_wa - math_ops.square(mean_wa)
return math_ops.sqrt(mixture_variance)
def make_tril_scale(
loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None):
"""Creates a LinOp representing a lower triangular matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to the LinOp.
The upper triangular elements above the diagonal are ignored.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to the LinOp.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
assert_positive: Python `bool` indicating whether LinOp should be checked
for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
array_ops.matrix_diag_part(x),
message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
check_ops.assert_none_equal(
array_ops.matrix_diag_part(x),
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
with ops.name_scope(name, "make_tril_scale",
values=[loc, scale_diag, scale_identity_multiplier]):
loc = _convert_to_tensor(loc, name="loc")
scale_tril = _convert_to_tensor(scale_tril, name="scale_tril")
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag")
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier")
if scale_tril is not None:
scale_tril = array_ops.matrix_band_part(scale_tril, -1, 0) # Zero out TriU.
tril_diag = array_ops.matrix_diag_part(scale_tril)
if scale_diag is not None:
tril_diag += scale_diag
if scale_identity_multiplier is not None:
tril_diag += scale_identity_multiplier[..., array_ops.newaxis]
scale_tril = array_ops.matrix_set_diag(scale_tril, tril_diag)
return linalg.LinearOperatorLowerTriangular(
tril=_maybe_attach_assertion(scale_tril),
is_non_singular=True,
is_self_adjoint=False,
is_positive_definite=assert_positive)
return make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
shape_hint=shape_hint,
validate_args=validate_args,
assert_positive=assert_positive,
name=name)
def make_diag_scale(
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None):
"""Creates a LinOp representing a diagonal matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to the LinOp.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
assert_positive: Python `bool` indicating whether LinOp should be checked
for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
x, message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
check_ops.assert_none_equal(
x,
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero")], x)
with ops.name_scope(name, "make_diag_scale",
values=[loc, scale_diag, scale_identity_multiplier]):
loc = _convert_to_tensor(loc, name="loc")
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag")
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier")
if scale_diag is not None:
if scale_identity_multiplier is not None:
scale_diag += scale_identity_multiplier[..., array_ops.newaxis]
return linalg.LinearOperatorDiag(
diag=_maybe_attach_assertion(scale_diag),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive)
if loc is None and shape_hint is None:
raise ValueError(
"Cannot infer `event_shape` unless `loc` or "
"`shape_hint` is specified.")
if shape_hint is None:
shape_hint = loc.shape[-1]
if scale_identity_multiplier is None:
return linalg.LinearOperatorIdentity(
num_rows=shape_hint,
dtype=loc.dtype.base_dtype,
is_self_adjoint=True,
is_positive_definite=True,
assert_proper_shapes=validate_args)
return linalg.LinearOperatorScaledIdentity(
num_rows=shape_hint,
multiplier=_maybe_attach_assertion(scale_identity_multiplier),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive,
assert_proper_shapes=validate_args)
def shapes_from_loc_and_scale(loc, scale, name="shapes_from_loc_and_scale"):
"""Infer distribution batch and event shapes from a location and scale.
Location and scale family distributions determine their batch/event shape by
broadcasting the `loc` and `scale` args. This helper does that broadcast,
statically if possible.
Batch shape broadcasts as per the normal rules.
We allow the `loc` event shape to broadcast up to that of `scale`. We do not
allow `scale`'s event shape to change. Therefore, the last dimension of `loc`
must either be size `1`, or the same as `scale.range_dimension`.
See `MultivariateNormalLinearOperator` for a usage example.
Args:
loc: `N-D` `Tensor` with `N >= 1` (already converted to tensor) or `None`.
If `None`, both batch and event shape are determined by `scale`.
scale: A `LinearOperator` instance.
name: A string name to prepend to created ops.
Returns:
batch_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
event_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
Raises:
ValueError: If the last dimension of `loc` is determined statically to be
different than the range of `scale`.
"""
with ops.name_scope(name, values=[loc] + scale.graph_parents):
# Get event shape.
event_size = scale.range_dimension_tensor()
event_size_const = tensor_util.constant_value(event_size)
if event_size_const is not None:
event_shape = event_size_const.reshape([1])
else:
event_shape = event_size[array_ops.newaxis]
# Static check that event shapes match.
if loc is not None:
loc_event_size = loc.get_shape()[-1].value
if loc_event_size is not None and event_size_const is not None:
if loc_event_size != 1 and loc_event_size != event_size_const:
raise ValueError(
"Event size of 'scale' (%d) could not be broadcast up to that of "
"'loc' (%d)." % (loc_event_size, event_size_const))
# Get batch shape.
batch_shape = scale.batch_shape_tensor()
if loc is None:
batch_shape_const = tensor_util.constant_value(batch_shape)
batch_shape = (
batch_shape_const if batch_shape_const is not None else batch_shape)
else:
loc_batch_shape = loc.get_shape().with_rank_at_least(1)[:-1]
if (loc.get_shape().ndims is None or
not loc_batch_shape.is_fully_defined()):
loc_batch_shape = array_ops.shape(loc)[:-1]
else:
loc_batch_shape = ops.convert_to_tensor(loc_batch_shape,
name="loc_batch_shape")
# This is defined in the core util module.
# pylint: disable=undefined-variable
batch_shape = prefer_static_broadcast_shape(batch_shape, loc_batch_shape)
# pylint: enable=undefined-variable
return batch_shape, event_shape
def get_broadcast_shape(*tensors):
"""Get broadcast shape as a Python list of integers (preferred) or `Tensor`.
Args:
*tensors: One or more `Tensor` objects (already converted!).
Returns:
broadcast shape: Python list (if shapes determined statically), otherwise
an `int32` `Tensor`.
"""
# Try static.
s_shape = tensors[0].shape
for t in tensors[1:]:
s_shape = array_ops.broadcast_static_shape(s_shape, t.shape)
if s_shape.is_fully_defined():
return s_shape.as_list()
# Fallback on dynamic.
d_shape = array_ops.shape(tensors[0])
for t in tensors[1:]:
d_shape = array_ops.broadcast_dynamic_shape(d_shape, array_ops.shape(t))
return d_shape
def is_diagonal_scale(scale):
"""Returns `True` if `scale` is a `LinearOperator` that is known to be diag.
Args:
scale: `LinearOperator` instance.
Returns:
Python `bool`.
Raises:
TypeError: If `scale` is not a `LinearOperator`.
"""
if not isinstance(scale, linalg.LinearOperator):
raise TypeError("Expected argument 'scale' to be instance of LinearOperator"
". Found: %s" % scale)
return (isinstance(scale, linalg.LinearOperatorIdentity) or
isinstance(scale, linalg.LinearOperatorScaledIdentity) or
isinstance(scale, linalg.LinearOperatorDiag))
def maybe_check_scalar_distribution(
distribution, expected_base_dtype, validate_args):
"""Helper which checks validity of a scalar `distribution` init arg.
Valid here means:
* `distribution` has scalar batch and event shapes.
* `distribution` is `FULLY_REPARAMETERIZED`
* `distribution` has expected dtype.
Args:
distribution: `Distribution`-like object.
expected_base_dtype: `TensorFlow` `dtype`.
validate_args: Python `bool`. Whether to do additional checks:
(i) check that reparameterization_type is `FULLY_REPARAMETERIZED`.
(ii) add `tf.Assert` ops to the graph to enforce that distribution
is scalar in the event that this cannot be determined statically.
Returns:
List of `tf.Assert` ops to run to enforce validity checks that could not
be statically determined. Empty if `not validate_args`.
Raises:
ValueError: If validate_args and distribution is not FULLY_REPARAMETERIZED
ValueError: If distribution is statically determined to not have both
scalar batch and scalar event shapes.
"""
if distribution.dtype != expected_base_dtype:
raise TypeError("dtype mismatch; "
"distribution.dtype=\"{}\" is not \"{}\"".format(
distribution.dtype.name, expected_base_dtype.name))
# Although `reparameterization_type` is a static property, we guard it by
# `validate_args`. This allows users to use a `distribution` which is not
# reparameterized itself. However, we tacitly assume that although the
# distribution is not reparameterized, it only depends on non-trainable
# variables.
if validate_args and (distribution.reparameterization_type
!= distribution_lib.FULLY_REPARAMETERIZED):
raise ValueError("Base distribution should be reparameterized or be "
"a function of non-trainable variables; "
"distribution.reparameterization_type = \"{}\" "
"!= \"FULLY_REPARAMETERIZED\".".format(
distribution.reparameterization_type))
with ops.name_scope(name="check_distribution"):
assertions = []
def check_is_scalar(is_scalar, name):
is_scalar_ = static_value(is_scalar)
if is_scalar_ is not None:
if not is_scalar_:
raise ValueError("distribution must be scalar; "
"distribution.{}=False is not True".format(name))
elif validate_args:
assertions.append(check_ops.assert_equal(
is_scalar, True,
message=("distribution must be scalar; "
"distribution.{}=False is not True".format(name))))
check_is_scalar(distribution.is_scalar_event(), "is_scalar_event")
check_is_scalar(distribution.is_scalar_batch(), "is_scalar_batch")
return assertions
def pad_mixture_dimensions(x, mixture_distribution, categorical_distribution,
event_ndims):
"""Pad dimensions of event tensors for mixture distributions.
See `Mixture._sample_n` and `MixtureSameFamily._sample_n` for usage examples.
Args:
x: event tensor to pad.
mixture_distribution: Base distribution of the mixture.
categorical_distribution: `Categorical` distribution that mixes the base
distribution.
event_ndims: Integer specifying the number of event dimensions in the event
tensor.
Returns:
A padded version of `x` that can broadcast with `categorical_distribution`.
"""
with ops.name_scope("pad_mix_dims", values=[x]):
def _get_ndims(d):
if d.batch_shape.ndims is not None:
return d.batch_shape.ndims
return array_ops.shape(d.batch_shape_tensor())[0]
dist_batch_ndims = _get_ndims(mixture_distribution)
cat_batch_ndims = _get_ndims(categorical_distribution)
pad_ndims = array_ops.where(
categorical_distribution.is_scalar_batch(),
dist_batch_ndims,
dist_batch_ndims - cat_batch_ndims)
s = array_ops.shape(x)
x = array_ops.reshape(x, shape=array_ops.concat([
s[:-1],
array_ops.ones([pad_ndims], dtype=dtypes.int32),
s[-1:],
array_ops.ones([event_ndims], dtype=dtypes.int32),
], axis=0))
return x
def static_value(x):
"""Returns the static value of a `Tensor` or `None`."""
return tensor_util.constant_value(ops.convert_to_tensor(x))
| apache-2.0 |
smerdis/pycortex | cortex/dataset/viewRGB.py | 2 | 7274 | import json
import numpy as np
from .views import Dataview, Volume, Vertex
from .braindata import VolumeData, VertexData, _hash
from ..database import db
from .. import options
default_cmap = options.config.get("basic", "default_cmap")
class DataviewRGB(Dataview):
def __init__(self, subject=None, alpha=None, description="", state=None, **kwargs):
self.alpha = alpha
self.subject = self.red.subject
self.movie = self.red.movie
self.description = description
self.state = state
self.attrs = kwargs
if 'priority' not in self.attrs:
self.attrs['priority'] = 1
# If movie, make sure each channel has the same number of time points
if self.red.movie:
if not self.red.data.shape[0] == self.green.data.shape[0] == self.blue.data.shape[0]:
raise ValueError("For movie data, all three channels have to be the same length")
def uniques(self, collapse=False):
if collapse:
yield self
else:
yield self.red
yield self.green
yield self.blue
if self.alpha is not None:
yield self.alpha
def _write_hdf(self, h5, name="data", xfmname=None):
self._cls._write_hdf(self.red, h5)
self._cls._write_hdf(self.green, h5)
self._cls._write_hdf(self.blue, h5)
alpha = None
if self.alpha is not None:
self._cls._write_hdf(self.alpha, h5)
alpha = self.alpha.name
data = [self.red.name, self.green.name, self.blue.name, alpha]
viewnode = Dataview._write_hdf(self, h5, name=name,
data=[data], xfmname=xfmname)
return viewnode
def to_json(self, simple=False):
sdict = super(DataviewRGB, self).to_json(simple=simple)
if simple:
sdict['name'] = self.name
sdict['subject'] = self.subject
sdict['min'] = 0
sdict['max'] = 255
sdict['shape'] = self.red.shape
else:
sdict['data'] = [self.name]
sdict['cmap'] = [default_cmap]
sdict['vmin'] = [0]
sdict['vmax'] = [255]
return sdict
class VolumeRGB(DataviewRGB):
_cls = VolumeData
def __init__(self, red, green, blue, subject=None, xfmname=None, alpha=None, description="",
state=None, **kwargs):
if isinstance(red, VolumeData):
if not isinstance(green, VolumeData) or red.subject != green.subject:
raise TypeError("Invalid data for green channel")
if not isinstance(blue, VolumeData) or red.subject != blue.subject:
raise TypeError("Invalid data for blue channel")
self.red = red
self.green = green
self.blue = blue
else:
if subject is None or xfmname is None:
raise TypeError("Subject and xfmname are required")
self.red = Volume(red, subject, xfmname)
self.green = Volume(green, subject, xfmname)
self.blue = Volume(blue, subject, xfmname)
if alpha is None:
alpha = np.ones(self.red.volume.shape)
if not isinstance(alpha, Volume):
alpha = Volume(alpha, self.red.subject, self.red.xfmname)
self.alpha = alpha
if self.red.xfmname == self.green.xfmname == self.blue.xfmname == self.alpha.xfmname:
self.xfmname = self.red.xfmname
else:
raise ValueError('Cannot handle different transforms per volume')
super(VolumeRGB, self).__init__(subject, alpha, description=description, state=state, **kwargs)
def to_json(self, simple=False):
sdict = super(VolumeRGB, self).to_json(simple=simple)
if not simple:
sdict['xfm'] = [list(np.array(db.get_xfm(self.subject, self.xfmname, 'coord').xfm).ravel())]
return sdict
@property
def volume(self):
volume = []
for dv in (self.red, self.green, self.blue, self.alpha):
vol = dv.volume.copy()
if vol.dtype != np.uint8:
if dv.vmin is None:
if vol.min() < 0:
vol -= vol.min()
else:
vol -= dv.vmin
if dv.vmax is None:
if vol.max() > 1:
vol /= vol.max()
else:
vol /= dv.vmax - dv.vmin
vol = (np.clip(vol, 0, 1) * 255).astype(np.uint8)
volume.append(vol)
return np.array(volume).transpose([1, 2, 3, 4, 0])
def __repr__(self):
return "<RGB volumetric data for (%s, %s)>"%(self.red.subject, self.red.xfmname)
def __hash__(self):
return hash(_hash(self.volume))
@property
def name(self):
return "__%s"%_hash(self.volume)[:16]
def _write_hdf(self, h5, name="data"):
return super(VolumeRGB, self)._write_hdf(h5, name=name, xfmname=[self.xfmname])
class VertexRGB(DataviewRGB):
_cls = VertexData
def __init__(self, red, green, blue, subject=None, alpha=None, description="",
state=None, **kwargs):
if isinstance(red, VertexData):
if not isinstance(green, VertexData) or red.subject != green.subject:
raise TypeError("Invalid data for green channel")
if not isinstance(blue, VertexData) or red.subject != blue.subject:
raise TypeError("Invalid data for blue channel")
self.red = red
self.green = green
self.blue = blue
else:
if subject is None:
raise TypeError("Subject name is required")
self.red = Vertex(red, subject)
self.green = Vertex(green, subject)
self.blue = Vertex(blue, subject)
super(VertexRGB, self).__init__(subject, alpha, description=description,
state=state, **kwargs)
@property
def vertices(self):
alpha = self.alpha
if alpha is None:
alpha = np.ones_like(self.red.data)
if not isinstance(alpha, Vertex):
alpha = Vertex(alpha, self.subject)
verts = []
for dv in (self.red, self.green, self.blue, alpha):
vert = dv.vertices.copy()
if vert.dtype != np.uint8:
if dv.vmin is None:
if vert.min() < 0:
vert -= vert.min()
else:
vert -= dv.vmin
if dv.vmax is None:
if vert.max() > 1:
vert /= vert.max()
else:
vert /= dv.vmax - dv.vmin
vert = (np.clip(vert, 0, 1) * 255).astype(np.uint8)
verts.append(vert)
return np.array(verts).transpose([1, 2, 0])
@property
def left(self):
return self.vertices[:,:self.red.llen]
@property
def right(self):
return self.vertices[:,self.red.llen:]
def __repr__(self):
return "<RGB vertex data for (%s)>"%(self.subject)
def __hash__(self):
return hash(_hash(self.vertices))
@property
def name(self):
return "__%s"%_hash(self.vertices)[:16]
| bsd-2-clause |
andrewyoung1991/scons | test/Fortran/FORTRANFLAGS.py | 4 | 4799 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
from common import write_fake_link
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
_exe = TestSCons._exe
write_fake_link(test)
test.write('myfortran.py', r"""
import getopt
import sys
opts, args = getopt.getopt(sys.argv[1:], 'co:x')
optstring = ''
for opt, arg in opts:
if opt == '-o': out = arg
else: optstring = optstring + ' ' + opt
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
outfile.write(optstring + "\n")
for l in infile.readlines():
if l[:8] != '#fortran':
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(LINK = r'%(_python_)s mylink.py',
LINKFLAGS = [],
FORTRAN = r'%(_python_)s myfortran.py',
FORTRANFLAGS = '-x')
env.Program(target = 'test01', source = 'test01.f')
env.Program(target = 'test02', source = 'test02.F')
env.Program(target = 'test03', source = 'test03.for')
env.Program(target = 'test04', source = 'test04.FOR')
env.Program(target = 'test05', source = 'test05.ftn')
env.Program(target = 'test06', source = 'test06.FTN')
env.Program(target = 'test07', source = 'test07.fpp')
env.Program(target = 'test08', source = 'test08.FPP')
""" % locals())
test.write('test01.f', "This is a .f file.\n#link\n#fortran\n")
test.write('test02.F', "This is a .F file.\n#link\n#fortran\n")
test.write('test03.for', "This is a .for file.\n#link\n#fortran\n")
test.write('test04.FOR', "This is a .FOR file.\n#link\n#fortran\n")
test.write('test05.ftn', "This is a .ftn file.\n#link\n#fortran\n")
test.write('test06.FTN', "This is a .FTN file.\n#link\n#fortran\n")
test.write('test07.fpp', "This is a .fpp file.\n#link\n#fortran\n")
test.write('test08.FPP', "This is a .FPP file.\n#link\n#fortran\n")
test.run(arguments = '.', stderr = None)
test.must_match('test01' + _exe, " -c -x\nThis is a .f file.\n")
test.must_match('test02' + _exe, " -c -x\nThis is a .F file.\n")
test.must_match('test03' + _exe, " -c -x\nThis is a .for file.\n")
test.must_match('test04' + _exe, " -c -x\nThis is a .FOR file.\n")
test.must_match('test05' + _exe, " -c -x\nThis is a .ftn file.\n")
test.must_match('test06' + _exe, " -c -x\nThis is a .FTN file.\n")
test.must_match('test07' + _exe, " -c -x\nThis is a .fpp file.\n")
test.must_match('test08' + _exe, " -c -x\nThis is a .FPP file.\n")
fc = 'f77'
g77 = test.detect_tool(fc)
if g77:
test.write("wrapper.py",
"""import os
import sys
open('%s', 'wb').write("wrapper.py\\n")
os.system(" ".join(sys.argv[1:]))
""" % test.workpath('wrapper.out').replace('\\', '\\\\'))
test.write('SConstruct', """
foo = Environment(FORTRAN = '%(fc)s')
f77 = foo.Dictionary('FORTRAN')
bar = foo.Clone(FORTRAN = r'%(_python_)s wrapper.py ' + f77, FORTRANFLAGS = '-Ix')
foo.Program(target = 'foo', source = 'foo.f')
bar.Program(target = 'bar', source = 'bar.f')
""" % locals())
test.write('foo.f', r"""
PROGRAM FOO
PRINT *,'foo.f'
STOP
END
""")
test.write('bar.f', r"""
PROGRAM BAR
PRINT *,'bar.f'
STOP
END
""")
test.run(arguments = 'foo' + _exe, stderr = None)
test.run(program = test.workpath('foo'), stdout = " foo.f\n")
test.must_not_exist('wrapper.out')
import sys
if sys.platform[:5] == 'sunos':
test.run(arguments = 'bar' + _exe, stderr = None)
else:
test.run(arguments = 'bar' + _exe)
test.run(program = test.workpath('bar'), stdout = " bar.f\n")
test.must_match('wrapper.out', "wrapper.py\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
cloudera/hue | desktop/core/ext-py/sqlparse-0.2.0/tests/test_parse.py | 6 | 11507 | # -*- coding: utf-8 -*-
"""Tests sqlparse.parse()."""
import pytest
import sqlparse
from sqlparse import sql, tokens as T
from sqlparse.compat import StringIO
def test_parse_tokenize():
s = 'select * from foo;'
stmts = sqlparse.parse(s)
assert len(stmts) == 1
assert str(stmts[0]) == s
def test_parse_multistatement():
sql1 = 'select * from foo;'
sql2 = 'select * from bar;'
stmts = sqlparse.parse(sql1 + sql2)
assert len(stmts) == 2
assert str(stmts[0]) == sql1
assert str(stmts[1]) == sql2
@pytest.mark.parametrize('s', ['select\n*from foo;',
'select\r\n*from foo',
'select\r*from foo',
'select\r\n*from foo\n'])
def test_parse_newlines(s):
p = sqlparse.parse(s)[0]
assert str(p) == s
def test_parse_within():
s = 'foo(col1, col2)'
p = sqlparse.parse(s)[0]
col1 = p.tokens[0].tokens[1].tokens[1].tokens[0]
assert col1.within(sql.Function)
def test_parse_child_of():
s = '(col1, col2)'
p = sqlparse.parse(s)[0]
assert p.tokens[0].tokens[1].is_child_of(p.tokens[0])
s = 'select foo'
p = sqlparse.parse(s)[0]
assert not p.tokens[2].is_child_of(p.tokens[0])
assert p.tokens[2].is_child_of(p)
def test_parse_has_ancestor():
s = 'foo or (bar, baz)'
p = sqlparse.parse(s)[0]
baz = p.tokens[-1].tokens[1].tokens[-1]
assert baz.has_ancestor(p.tokens[-1].tokens[1])
assert baz.has_ancestor(p.tokens[-1])
assert baz.has_ancestor(p)
@pytest.mark.parametrize('s', ['.5', '.51', '1.5', '12.5'])
def test_parse_float(s):
t = sqlparse.parse(s)[0].tokens
assert len(t) == 1
assert t[0].ttype is sqlparse.tokens.Number.Float
@pytest.mark.parametrize('s, holder', [
('select * from foo where user = ?', '?'),
('select * from foo where user = :1', ':1'),
('select * from foo where user = :name', ':name'),
('select * from foo where user = %s', '%s'),
('select * from foo where user = $a', '$a')])
def test_parse_placeholder(s, holder):
t = sqlparse.parse(s)[0].tokens[-1].tokens
assert t[-1].ttype is sqlparse.tokens.Name.Placeholder
assert t[-1].value == holder
def test_parse_modulo_not_placeholder():
tokens = list(sqlparse.lexer.tokenize('x %3'))
assert tokens[2][0] == sqlparse.tokens.Operator
def test_parse_access_symbol():
# see issue27
t = sqlparse.parse('select a.[foo bar] as foo')[0].tokens
assert isinstance(t[-1], sql.Identifier)
assert t[-1].get_name() == 'foo'
assert t[-1].get_real_name() == '[foo bar]'
assert t[-1].get_parent_name() == 'a'
def test_parse_square_brackets_notation_isnt_too_greedy():
# see issue153
t = sqlparse.parse('[foo], [bar]')[0].tokens
assert isinstance(t[0], sql.IdentifierList)
assert len(t[0].tokens) == 4
assert t[0].tokens[0].get_real_name() == '[foo]'
assert t[0].tokens[-1].get_real_name() == '[bar]'
def test_parse_keyword_like_identifier():
# see issue47
t = sqlparse.parse('foo.key')[0].tokens
assert len(t) == 1
assert isinstance(t[0], sql.Identifier)
def test_parse_function_parameter():
# see issue94
t = sqlparse.parse('abs(some_col)')[0].tokens[0].get_parameters()
assert len(t) == 1
assert isinstance(t[0], sql.Identifier)
def test_parse_function_param_single_literal():
t = sqlparse.parse('foo(5)')[0].tokens[0].get_parameters()
assert len(t) == 1
assert t[0].ttype is T.Number.Integer
def test_parse_nested_function():
t = sqlparse.parse('foo(bar(5))')[0].tokens[0].get_parameters()
assert len(t) == 1
assert type(t[0]) is sql.Function
def test_quoted_identifier():
t = sqlparse.parse('select x.y as "z" from foo')[0].tokens
assert isinstance(t[2], sql.Identifier)
assert t[2].get_name() == 'z'
assert t[2].get_real_name() == 'y'
@pytest.mark.parametrize('name', ['foo', '_foo'])
def test_valid_identifier_names(name):
# issue175
t = sqlparse.parse(name)[0].tokens
assert isinstance(t[0], sql.Identifier)
def test_psql_quotation_marks():
# issue83
# regression: make sure plain $$ work
t = sqlparse.split("""
CREATE OR REPLACE FUNCTION testfunc1(integer) RETURNS integer AS $$
....
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION testfunc2(integer) RETURNS integer AS $$
....
$$ LANGUAGE plpgsql;""")
assert len(t) == 2
# make sure $SOMETHING$ works too
t = sqlparse.split("""
CREATE OR REPLACE FUNCTION testfunc1(integer) RETURNS integer AS $PROC_1$
....
$PROC_1$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION testfunc2(integer) RETURNS integer AS $PROC_2$
....
$PROC_2$ LANGUAGE plpgsql;""")
assert len(t) == 2
def test_double_precision_is_builtin():
s = 'DOUBLE PRECISION'
t = sqlparse.parse(s)[0].tokens
assert len(t) == 1
assert t[0].ttype == sqlparse.tokens.Name.Builtin
assert t[0].value == 'DOUBLE PRECISION'
@pytest.mark.parametrize('ph', ['?', ':1', ':foo', '%s', '%(foo)s'])
def test_placeholder(ph):
p = sqlparse.parse(ph)[0].tokens
assert len(p) == 1
assert p[0].ttype is T.Name.Placeholder
@pytest.mark.parametrize('num', ['6.67428E-8', '1.988e33', '1e-12'])
def test_scientific_numbers(num):
p = sqlparse.parse(num)[0].tokens
assert len(p) == 1
assert p[0].ttype is T.Number.Float
def test_single_quotes_are_strings():
p = sqlparse.parse("'foo'")[0].tokens
assert len(p) == 1
assert p[0].ttype is T.String.Single
def test_double_quotes_are_identifiers():
p = sqlparse.parse('"foo"')[0].tokens
assert len(p) == 1
assert isinstance(p[0], sql.Identifier)
def test_single_quotes_with_linebreaks():
# issue118
p = sqlparse.parse("'f\nf'")[0].tokens
assert len(p) == 1
assert p[0].ttype is T.String.Single
def test_sqlite_identifiers():
# Make sure we still parse sqlite style escapes
p = sqlparse.parse('[col1],[col2]')[0].tokens
id_names = [id_.get_name() for id_ in p[0].get_identifiers()]
assert len(p) == 1
assert isinstance(p[0], sql.IdentifierList)
assert id_names == ['[col1]', '[col2]']
p = sqlparse.parse('[col1]+[col2]')[0]
types = [tok.ttype for tok in p.flatten()]
assert types == [T.Name, T.Operator, T.Name]
def test_simple_1d_array_index():
p = sqlparse.parse('col[1]')[0].tokens
assert len(p) == 1
assert p[0].get_name() == 'col'
indices = list(p[0].get_array_indices())
assert len(indices) == 1 # 1-dimensional index
assert len(indices[0]) == 1 # index is single token
assert indices[0][0].value == '1'
def test_2d_array_index():
p = sqlparse.parse('col[x][(y+1)*2]')[0].tokens
assert len(p) == 1
assert p[0].get_name() == 'col'
assert len(list(p[0].get_array_indices())) == 2 # 2-dimensional index
def test_array_index_function_result():
p = sqlparse.parse('somefunc()[1]')[0].tokens
assert len(p) == 1
assert len(list(p[0].get_array_indices())) == 1
def test_schema_qualified_array_index():
p = sqlparse.parse('schem.col[1]')[0].tokens
assert len(p) == 1
assert p[0].get_parent_name() == 'schem'
assert p[0].get_name() == 'col'
assert list(p[0].get_array_indices())[0][0].value == '1'
def test_aliased_array_index():
p = sqlparse.parse('col[1] x')[0].tokens
assert len(p) == 1
assert p[0].get_alias() == 'x'
assert p[0].get_real_name() == 'col'
assert list(p[0].get_array_indices())[0][0].value == '1'
def test_array_literal():
# See issue #176
p = sqlparse.parse('ARRAY[%s, %s]')[0]
assert len(p.tokens) == 2
assert len(list(p.flatten())) == 7
def test_typed_array_definition():
# array indices aren't grouped with builtins, but make sure we can extract
# indentifer names
p = sqlparse.parse('x int, y int[], z int')[0]
names = [x.get_name() for x in p.get_sublists()
if isinstance(x, sql.Identifier)]
assert names == ['x', 'y', 'z']
@pytest.mark.parametrize('s', ['select 1 -- foo', 'select 1 # foo'])
def test_single_line_comments(s):
# see issue178
p = sqlparse.parse(s)[0]
assert len(p.tokens) == 5
assert p.tokens[-1].ttype == T.Comment.Single
@pytest.mark.parametrize('s', ['foo', '@foo', '#foo', '##foo'])
def test_names_and_special_names(s):
# see issue192
p = sqlparse.parse(s)[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Identifier)
def test_get_token_at_offset():
p = sqlparse.parse('select * from dual')[0]
# 0123456789
assert p.get_token_at_offset(0) == p.tokens[0]
assert p.get_token_at_offset(1) == p.tokens[0]
assert p.get_token_at_offset(6) == p.tokens[1]
assert p.get_token_at_offset(7) == p.tokens[2]
assert p.get_token_at_offset(8) == p.tokens[3]
assert p.get_token_at_offset(9) == p.tokens[4]
assert p.get_token_at_offset(10) == p.tokens[4]
def test_pprint():
p = sqlparse.parse('select a0, b0, c0, d0, e0 from '
'(select * from dual) q0 where 1=1 and 2=2')[0]
output = StringIO()
p._pprint_tree(f=output)
pprint = '\n'.join([
" 0 DML 'select'",
" 1 Whitespace ' '",
" 2 IdentifierList 'a0, b0...'",
" | 0 Identifier 'a0'",
" | | 0 Name 'a0'",
" | 1 Punctuation ','",
" | 2 Whitespace ' '",
" | 3 Identifier 'b0'",
" | | 0 Name 'b0'",
" | 4 Punctuation ','",
" | 5 Whitespace ' '",
" | 6 Identifier 'c0'",
" | | 0 Name 'c0'",
" | 7 Punctuation ','",
" | 8 Whitespace ' '",
" | 9 Identifier 'd0'",
" | | 0 Name 'd0'",
" | 10 Punctuation ','",
" | 11 Whitespace ' '",
" | 12 Float 'e0'",
" 3 Whitespace ' '",
" 4 Keyword 'from'",
" 5 Whitespace ' '",
" 6 Identifier '(selec...'",
" | 0 Parenthesis '(selec...'",
" | | 0 Punctuation '('",
" | | 1 DML 'select'",
" | | 2 Whitespace ' '",
" | | 3 Wildcard '*'",
" | | 4 Whitespace ' '",
" | | 5 Keyword 'from'",
" | | 6 Whitespace ' '",
" | | 7 Identifier 'dual'",
" | | | 0 Name 'dual'",
" | | 8 Punctuation ')'",
" | 1 Whitespace ' '",
" | 2 Identifier 'q0'",
" | | 0 Name 'q0'",
" 7 Whitespace ' '",
" 8 Where 'where ...'",
" | 0 Keyword 'where'",
" | 1 Whitespace ' '",
" | 2 Comparison '1=1'",
" | | 0 Integer '1'",
" | | 1 Comparison '='",
" | | 2 Integer '1'",
" | 3 Whitespace ' '",
" | 4 Keyword 'and'",
" | 5 Whitespace ' '",
" | 6 Comparison '2=2'",
" | | 0 Integer '2'",
" | | 1 Comparison '='",
" | | 2 Integer '2'",
""])
assert output.getvalue() == pprint
def test_wildcard_multiplication():
p = sqlparse.parse('select * from dual')[0]
assert p.tokens[2].ttype == T.Wildcard
p = sqlparse.parse('select a0.* from dual a0')[0]
assert p.tokens[2][2].ttype == T.Wildcard
p = sqlparse.parse('select 1 * 2 from dual')[0]
assert p.tokens[2][2].ttype == T.Operator
def test_stmt_tokens_parents():
# see issue 226
s = "CREATE TABLE test();"
stmt = sqlparse.parse(s)[0]
for token in stmt.tokens:
assert token.has_ancestor(stmt)
| apache-2.0 |
weijia/django-excel-to-model | django_excel_to_model/management/commands/import_excel_according_to_model.py | 1 | 7251 | import argparse
import importlib
import os
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from django_excel_to_model.field_tools import get_valid_excel_field_name
from django_excel_to_model.file_readers.csv_reader import CsvFile
from django_excel_to_model.file_readers.data_source_factory import DataSourceFactory
from django_excel_to_model.management.commands.utils.bulk_inserter import BulkInserter
from django_excel_to_model.management.commands.utils.counter import Counter
from django_excel_to_model.models import ExcelImportTask
from django_excel_to_model.reader import ExcelFile, XlsbFile
try:
from pinax.eventlog.models import log
except ImportError:
log = None
class MandatoryColumnMissing(Exception):
pass
class DictTranslator(object):
# noinspection PyMethodMayBeStatic
def translate(self, item_dict):
for ignored in ["__invalid"]:
if ignored in item_dict:
del item_dict["__invalid"]
return item_dict
class ExcelFileFromClassImporter(object):
def __init__(self, class_instance, sheet_numbered_from_1=1):
super(ExcelFileFromClassImporter, self).__init__()
self.model_module = importlib.import_module(class_instance.__module__)
self.class_instance = class_instance
self.translator = DictTranslator()
self.sheet_numbered_from_1 = sheet_numbered_from_1
self.inserter = BulkInserter(self.class_instance)
self.mandatory_column_headers = None
self.is_clean_before_import = False
def set_clean_before_import(self):
self.is_clean_before_import = True
def import_excel(self, full_path, header_row_numbered_from_1=1, first_import_row_numbered_from_1=2, count=1000):
filename = os.path.basename(full_path)
data_source = DataSourceFactory(full_path).get_data_source(
sheet_index_numbered_from_0=self.sheet_numbered_from_1 - 1,
header_row_start_from_0=header_row_numbered_from_1 - 1)
count = min(data_source.get_total_rows(), count)
c = Counter(count)
self.validate_existence_of_mandatory_columns(data_source)
if self.is_clean_before_import:
self.class_instance.objects.all().delete()
column_to_db_field_mapping = self._get_column_to_db_field_mapping(data_source)
for item_info_dict in data_source.enumerate_mapped(column_to_db_field_mapping,
start_row_numbered_from_0=first_import_row_numbered_from_1 - 1):
# print item_info_dict
self.translator.translate(item_info_dict)
item_info_dict["data_import_id"] = filename
self.inserter.insert(item_info_dict)
# If count = 1, when 1 processed, cnt will become 0
c.decrease()
if c.is_equal_or_below(0):
self.commit_and_log(filename)
return 0
self.commit_and_log(filename)
return -1
def _get_column_to_db_field_mapping(self, data_source):
column_to_db_field_mapping = {}
for column_name in data_source.get_headers():
if column_name in self.model_module.mapping:
column_to_db_field_mapping[column_name] = self.model_module.mapping[column_name]
else:
converted_field_name = get_valid_excel_field_name(column_name)
if converted_field_name in self.model_module.mapping:
column_to_db_field_mapping[column_name] = \
self.model_module.mapping[converted_field_name]
return column_to_db_field_mapping
def validate_existence_of_mandatory_columns(self, sheet):
if self.mandatory_column_headers is not None:
if not all(spreadsheet_column_header in sheet.get_headers()
for spreadsheet_column_header in self.mandatory_column_headers):
raise MandatoryColumnMissing()
def commit_and_log(self, filename):
self.inserter.commit()
if log is not None:
log(
user=None,
action=filename,
extra={
"filename": filename
},
obj=ContentType.objects.get_for_model(ExcelImportTask)
)
def import_excel_according_to_model(full_path, content_type_id, header_row_numbered_from_1,
first_import_row_numbered_from_1, count=1000):
content = ContentType.objects.get(pk=content_type_id)
e = ExcelFileFromClassImporter(content.model_class())
e.import_excel(full_path, header_row_numbered_from_1, first_import_row_numbered_from_1, count)
class Command(BaseCommand):
help = 'Import excel according to model info'
def add_arguments(self, parser):
parser.add_argument('file-path')
parser.add_argument('content-type-id')
parser.add_argument('header_row_numbered_from_1')
parser.add_argument('start')
parser.add_argument('count', nargs='?')
def handle(self, *args, **options):
parser = argparse.ArgumentParser(description='Import excel file according to model info')
subparsers = parser.add_subparsers(help='sub-command help')
# create the parser for the "a" command
parser_import_excel_according_to_model = subparsers.add_parser(
'import_excel_according_to_model', help='import_excel_according_to_model help')
# parser_import_excel_according_to_model.add_argument(
# '--content-type-id', type=int, help='content type pk')
# parser_import_excel_according_to_model.add_argument(
# '--start', default=1,
# help='start line for importing excel, default=1 (second line)')
parser_import_excel_according_to_model.add_argument(
'file-path', nargs=1, help='path of the excel file')
parser_import_excel_according_to_model.add_argument(
'content-type-id', nargs=1, help='content id of the model', type=int)
parser_import_excel_according_to_model.add_argument(
'header_row_numbered_from_1', nargs=1, help='header row number (start from 1)', type=int)
parser_import_excel_according_to_model.add_argument(
'start', nargs=1, help='path of the excel file', type=int)
parser_import_excel_according_to_model.add_argument(
'count', nargs="?", help='process line count', default=[1000], type=int)
arg_result = parser.parse_args()
print(vars(arg_result)["file-path"][0])
print(vars(arg_result)["content-type-id"][0])
print(vars(arg_result)["start"][0])
print(vars(arg_result)["count"])
file_path = vars(arg_result)["file-path"][0]
content_type_id = vars(arg_result)["content-type-id"][0]
header_row_numbered_from_1 = vars(arg_result)["header_row_numbered_from_1"][0]
first_import_row_numbered_from_1 = vars(arg_result)["start"][0]
count = vars(arg_result)["count"]
return import_excel_according_to_model(
file_path, content_type_id, header_row_numbered_from_1, first_import_row_numbered_from_1, count)
| bsd-3-clause |
acsone/project | project_analytic_line_view/__openerp__.py | 16 | 1458 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joël Grand-Guillaume, Matthieu Dietrich
# Copyright 2010-2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Project analytic account line view",
'version': '8.0.1.0.0',
'category': 'Generic Modules/Projects & Services',
'author': "Camptocamp,Odoo Community Association (OCA)",
'website': 'http://www.camptocamp.com',
'license': 'AGPL-3',
'depends': ['project',
'analytic',
],
'data': [
'analytic_account_view.xml',
],
'test': [],
'installable': True,
'auto_install': False,
'application': False
}
| agpl-3.0 |
veger/ansible | lib/ansible/modules/network/aci/aci_contract_subject.py | 2 | 10605 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_contract_subject
short_description: Manage initial Contract Subjects (vz:Subj)
description:
- Manage initial Contract Subjects on Cisco ACI fabrics.
notes:
- The C(tenant) and C(contract) used must exist before using this module in your playbook.
The M(aci_tenant) and M(aci_contract) modules can be used for this.
seealso:
- module: aci_contract
- module: aci_tenant
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(vz:Subj).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Swetha Chunduri (@schunduri)
version_added: '2.4'
options:
tenant:
description:
- The name of the tenant.
aliases: [ tenant_name ]
subject:
description:
- The contract subject name.
aliases: [ contract_subject, name, subject_name ]
contract:
description:
- The name of the Contract.
aliases: [ contract_name ]
reverse_filter:
description:
- Determines if the APIC should reverse the src and dst ports to allow the
return traffic back, since ACI is stateless filter.
- The APIC defaults to C(yes) when unset during creation.
type: bool
priority:
description:
- The QoS class.
- The APIC defaults to C(unspecified) when unset during creation.
choices: [ level1, level2, level3, unspecified ]
dscp:
description:
- The target DSCP.
- The APIC defaults to C(unspecified) when unset during creation.
choices: [ AF11, AF12, AF13, AF21, AF22, AF23, AF31, AF32, AF33, AF41, AF42, AF43,
CS0, CS1, CS2, CS3, CS4, CS5, CS6, CS7, EF, VA, unspecified ]
aliases: [ target ]
description:
description:
- Description for the contract subject.
aliases: [ descr ]
consumer_match:
description:
- The match criteria across consumers.
- The APIC defaults to C(at_least_one) when unset during creation.
choices: [ all, at_least_one, at_most_one, none ]
provider_match:
description:
- The match criteria across providers.
- The APIC defaults to C(at_least_one) when unset during creation.
choices: [ all, at_least_one, at_most_one, none ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new contract subject
aci_contract_subject:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: default
description: test
reverse_filter: yes
priority: level1
dscp: unspecified
state: present
register: query_result
- name: Remove a contract subject
aci_contract_subject:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: default
state: absent
delegate_to: localhost
- name: Query a contract subject
aci_contract_subject:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: default
state: query
delegate_to: localhost
register: query_result
- name: Query all contract subjects
aci_contract_subject:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
MATCH_MAPPING = dict(all='All', at_least_one='AtleastOne', at_most_one='AtmostOne', none='None')
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
contract=dict(type='str', aliases=['contract_name']), # Not required for querying all objects
subject=dict(type='str', aliases=['contract_subject', 'name', 'subject_name']), # Not required for querying all objects
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
priority=dict(type='str', choices=['unspecified', 'level1', 'level2', 'level3']),
reverse_filter=dict(type='bool'),
dscp=dict(type='str', aliases=['target'],
choices=['AF11', 'AF12', 'AF13', 'AF21', 'AF22', 'AF23', 'AF31', 'AF32', 'AF33', 'AF41', 'AF42', 'AF43',
'CS0', 'CS1', 'CS2', 'CS3', 'CS4', 'CS5', 'CS6', 'CS7', 'EF', 'VA', 'unspecified']),
description=dict(type='str', aliases=['descr']),
consumer_match=dict(type='str', choices=['all', 'at_least_one', 'at_most_one', 'none']),
provider_match=dict(type='str', choices=['all', 'at_least_one', 'at_most_one', 'none']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
directive=dict(type='str', removed_in_version='2.4'), # Deprecated starting from v2.4
filter=dict(type='str', aliases=['filter_name'], removed_in_version='2.4'), # Deprecated starting from v2.4
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['contract', 'subject', 'tenant']],
['state', 'present', ['contract', 'subject', 'tenant']],
],
)
aci = ACIModule(module)
subject = module.params['subject']
priority = module.params['priority']
reverse_filter = aci.boolean(module.params['reverse_filter'])
contract = module.params['contract']
dscp = module.params['dscp']
description = module.params['description']
filter_name = module.params['filter']
directive = module.params['directive']
consumer_match = module.params['consumer_match']
if consumer_match is not None:
consumer_match = MATCH_MAPPING[consumer_match]
provider_match = module.params['provider_match']
if provider_match is not None:
provider_match = MATCH_MAPPING[provider_match]
state = module.params['state']
tenant = module.params['tenant']
if directive is not None or filter_name is not None:
module.fail_json(msg="Managing Contract Subjects to Filter bindings has been moved to module 'aci_subject_bind_filter'")
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='vzBrCP',
aci_rn='brc-{0}'.format(contract),
module_object=contract,
target_filter={'name': contract},
),
subclass_2=dict(
aci_class='vzSubj',
aci_rn='subj-{0}'.format(subject),
module_object=subject,
target_filter={'name': subject},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='vzSubj',
class_config=dict(
name=subject,
prio=priority,
revFltPorts=reverse_filter,
targetDscp=dscp,
consMatchT=consumer_match,
provMatchT=provider_match,
descr=description,
),
)
aci.get_diff(aci_class='vzSubj')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
MiyamotoAkira/kivy | kivy/modules/keybinding.py | 81 | 1699 | '''Keybinding
==========
This module forces the mapping of some keys to functions:
* F11: Rotate the Window through 0, 90, 180 and 270 degrees
* Shift + F11: Switches between portrait and landscape on desktops
* F12: Take a screenshot
Note: this does't work if the application requests the keyboard beforehand.
Usage
-----
For normal module usage, please see the :mod:`~kivy.modules` documentation.
The Keybinding module, however, can also be imported and used just
like a normal python module. This has the added advantage of being
able to activate and deactivate the module programmatically::
from kivy.app import App
from kivy.uix.button import Button
from kivy.modules import keybinding
from kivy.core.window import Window
class Demo(App):
def build(self):
button = Button(text="Hello")
keybinding.start(Window, button)
return button
Demo().run()
To remove the Keybinding, you can do the following::
Keybinding.stop(Window, button)
'''
from kivy.utils import platform
__all__ = ('start', 'stop')
def _on_keyboard_handler(instance, key, scancode, codepoint, modifiers):
if key == 293 and modifiers == []: # F12
instance.screenshot()
elif key == 292 and modifiers == []: # F11
instance.rotation += 90
elif key == 292 and modifiers == ['shift']: # Shift + F11
if platform in ('win', 'linux', 'macosx'):
instance.rotation = 0
w, h = instance.size
w, h = h, w
instance.size = (w, h)
def start(win, ctx):
win.bind(on_keyboard=_on_keyboard_handler)
def stop(win, ctx):
win.unbind(on_keyboard=_on_keyboard_handler)
| mit |
miipl-naveen/optibizz | openerp/addons/base/res/__init__.py | 384 | 1261 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_country
import res_lang
import res_partner
import res_bank
import res_config
import res_currency
import res_font
import res_company
import res_users
import res_request
import res_lang
import ir_property
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fedorpatlin/ansible | lib/ansible/modules/windows/win_robocopy.py | 72 | 4833 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Corwin Brown <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_robocopy
version_added: "2.2"
short_description: Synchronizes the contents of two directories using Robocopy.
description:
- Synchronizes the contents of two directories on the remote machine. Under the hood this just calls out to RoboCopy, since that should be available
on most modern Windows Systems.
options:
src:
description:
- Source file/directory to sync.
required: true
dest:
description:
- Destination file/directory to sync (Will receive contents of src).
required: true
recurse:
description:
- Includes all subdirectories (Toggles the `/e` flag to RoboCopy). If "flags" is set, this will be ignored.
choices:
- true
- false
default: false
required: false
purge:
description:
- Deletes any files/directories found in the destination that do not exist in the source (Toggles the `/purge` flag to RoboCopy). If "flags" is
set, this will be ignored.
choices:
- true
- false
default: false
required: false
flags:
description:
- Directly supply Robocopy flags. If set, purge and recurse will be ignored.
default: None
required: false
author: Corwin Brown (@blakfeld)
notes:
- This is not a complete port of the "synchronize" module. Unlike the "synchronize" module this only performs the sync/copy on the remote machine,
not from the master to the remote machine.
- This module does not currently support all Robocopy flags.
- Works on Windows 7, Windows 8, Windows Server 2k8, and Windows Server 2k12
'''
EXAMPLES = r'''
- name: Sync the contents of one directory to another
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
- name: Sync the contents of one directory to another, including subdirectories
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
recurse: True
- name: Sync the contents of one directory to another, and remove any files/directories found in destination that do not exist in the source
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
purge: True
- name: Sync content in recursive mode, removing any files/directories found in destination that do not exist in the source
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
recurse: True
purge: True
- name: Sync Two Directories in recursive and purging mode, specifying additional special flags
win_robocopy:
src: C:\DirectoryOne
dest: C:\DirectoryTwo
flags: /E /PURGE /XD SOME_DIR /XF SOME_FILE /MT:32
'''
RETURN = r'''
src:
description: The Source file/directory of the sync.
returned: always
type: string
sample: c:\Some\Path
dest:
description: The Destination file/directory of the sync.
returned: always
type: string
sample: c:\Some\Path
recurse:
description: Whether or not the recurse flag was toggled.
returned: always
type: bool
sample: False
purge:
description: Whether or not the purge flag was toggled.
returned: always
type: bool
sample: False
flags:
description: Any flags passed in by the user.
returned: always
type: string
sample: "/e /purge"
rc:
description: The return code retuned by robocopy.
returned: success
type: int
sample: 1
output:
description: The output of running the robocopy command.
returned: success
type: string
sample: "----------------------------------------\n ROBOCOPY :: Robust File Copy for Windows \n----------------------------------------\n"
msg:
description: Output intrepreted into a concise message.
returned: always
type: string
sample: No files copied!
changed:
description: Whether or not any changes were made.
returned: always
type: bool
sample: False
'''
| gpl-3.0 |
ganeshgore/myremolab | server/src/build/lib.linux-i686-2.7/weblab/core/coordinator/sql/post_reservation.py | 2 | 3783 | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <[email protected]>
#
import datetime
import traceback
from sqlalchemy.exc import IntegrityError, ConcurrentModificationError
from voodoo.typechecker import typecheck
from weblab.core.coordinator.sql.model import PostReservationRetrievedData
import weblab.core.coordinator.status as WSS
class PostReservationDataManager(object):
def __init__(self, session_maker, time_provider):
self._session_maker = session_maker
self.time_provider = time_provider
@typecheck(basestring, datetime.datetime, datetime.datetime, basestring)
def create(self, reservation_id, date, expiration_date, initial_data):
session = self._session_maker()
try:
registry = PostReservationRetrievedData(reservation_id = reservation_id, finished = False, date = date, expiration_date = expiration_date, initial_data = initial_data, end_data = None)
session.add(registry)
session.commit()
finally:
session.close()
def delete(self, reservation_id):
session = self._session_maker()
try:
reservation = session.query(PostReservationRetrievedData).filter(PostReservationRetrievedData.reservation_id == reservation_id).first()
if reservation is None:
return
session.delete(reservation)
session.commit()
finally:
session.close()
def finish(self, reservation_id, end_data):
session = self._session_maker()
try:
reservation = session.query(PostReservationRetrievedData).filter(PostReservationRetrievedData.reservation_id == reservation_id).first()
if reservation is None:
return
reservation.finished = True
reservation.end_data = end_data
session.add(reservation)
session.commit()
finally:
session.close()
def find(self, reservation_id):
session = self._session_maker()
try:
reservation = session.query(PostReservationRetrievedData).filter(PostReservationRetrievedData.reservation_id == reservation_id).first()
if reservation is None:
return None
return WSS.PostReservationStatus(reservation_id, reservation.finished, reservation.initial_data, reservation.end_data)
finally:
session.close()
##############################################################
#
# Clean expired PostReservationRetrievedData
#
def clean_expired(self):
session = self._session_maker()
try:
found = False
for expired_data in session.query(PostReservationRetrievedData).filter(PostReservationRetrievedData.expiration_date < self.time_provider.get_datetime()).all():
session.delete(expired_data)
found = True
if found:
try:
session.commit()
except (ConcurrentModificationError, IntegrityError):
# Somebody else did it
traceback.print_exc()
finally:
session.close()
def _clean(self):
session = self._session_maker()
try:
for registry in session.query(PostReservationRetrievedData).all():
session.delete(registry)
session.commit()
finally:
session.close()
| bsd-2-clause |
nikhilprathapani/python-for-android | python3-alpha/python3-src/Lib/test/test_codecencodings_cn.py | 55 | 3144 | #!/usr/bin/env python3
#
# test_codecencodings_cn.py
# Codec encoding tests for PRC encodings.
#
from test import support
from test import test_multibytecodec_support
import unittest
class Test_GB2312(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gb2312'
tstring = test_multibytecodec_support.load_teststring('gb2312')
codectests = (
# invalid bytes
(b"abc\x81\x81\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x81\x81\xc1\xc4", "replace", "abc\ufffd\u804a"),
(b"abc\x81\x81\xc1\xc4\xc8", "replace", "abc\ufffd\u804a\ufffd"),
(b"abc\x81\x81\xc1\xc4", "ignore", "abc\u804a"),
(b"\xc1\x64", "strict", None),
)
class Test_GBK(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gbk'
tstring = test_multibytecodec_support.load_teststring('gbk')
codectests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\u804a"),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\u804a\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\u804a"),
(b"\x83\x34\x83\x31", "strict", None),
("\u30fb", "strict", None),
)
class Test_GB18030(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gb18030'
tstring = test_multibytecodec_support.load_teststring('gb18030')
codectests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\u804a"),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\u804a\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\u804a"),
(b"abc\x84\x39\x84\x39\xc1\xc4", "replace", "abc\ufffd\u804a"),
("\u30fb", "strict", b"\x819\xa79"),
)
has_iso10646 = True
class Test_HZ(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'hz'
tstring = test_multibytecodec_support.load_teststring('hz')
codectests = (
# test '~\n' (3 lines)
(b'This sentence is in ASCII.\n'
b'The next sentence is in GB.~{<:Ky2;S{#,~}~\n'
b'~{NpJ)l6HK!#~}Bye.\n',
'strict',
'This sentence is in ASCII.\n'
'The next sentence is in GB.'
'\u5df1\u6240\u4e0d\u6b32\uff0c\u52ff\u65bd\u65bc\u4eba\u3002'
'Bye.\n'),
# test '~\n' (4 lines)
(b'This sentence is in ASCII.\n'
b'The next sentence is in GB.~\n'
b'~{<:Ky2;S{#,NpJ)l6HK!#~}~\n'
b'Bye.\n',
'strict',
'This sentence is in ASCII.\n'
'The next sentence is in GB.'
'\u5df1\u6240\u4e0d\u6b32\uff0c\u52ff\u65bd\u65bc\u4eba\u3002'
'Bye.\n'),
# invalid bytes
(b'ab~cd', 'replace', 'ab\uFFFDd'),
(b'ab\xffcd', 'replace', 'ab\uFFFDcd'),
(b'ab~{\x81\x81\x41\x44~}cd', 'replace', 'ab\uFFFD\uFFFD\u804Acd'),
)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| apache-2.0 |
klim-iv/phantomjs-qt5 | src/webkit/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py | 125 | 22854 | # Copyright (c) 2009, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import operator
import re
import urllib
import urllib2
import webkitpy.common.config.urls as config_urls
from webkitpy.common.memoized import memoized
from webkitpy.common.net.failuremap import FailureMap
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.net.networktransaction import NetworkTransaction
from webkitpy.common.net.regressionwindow import RegressionWindow
from webkitpy.common.system.logutils import get_logger
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
_log = get_logger(__file__)
class Builder(object):
def __init__(self, name, buildbot):
self._name = name
self._buildbot = buildbot
self._builds_cache = {}
self._revision_to_build_number = None
from webkitpy.thirdparty.autoinstalled.mechanize import Browser
self._browser = Browser()
self._browser.set_handle_robots(False) # The builder pages are excluded by robots.txt
def name(self):
return self._name
def results_url(self):
return "%s/results/%s" % (self._buildbot.buildbot_url, self.url_encoded_name())
# In addition to per-build results, the build.chromium.org builders also
# keep a directory that accumulates test results over many runs.
def accumulated_results_url(self):
return None
def latest_layout_test_results_url(self):
return self.accumulated_results_url() or self.latest_cached_build().results_url();
@memoized
def latest_layout_test_results(self):
return self.fetch_layout_test_results(self.latest_layout_test_results_url())
def _fetch_file_from_results(self, results_url, file_name):
# It seems this can return None if the url redirects and then returns 404.
result = urllib2.urlopen("%s/%s" % (results_url, file_name))
if not result:
return None
# urlopen returns a file-like object which sometimes works fine with str()
# but sometimes is a addinfourl object. In either case calling read() is correct.
return result.read()
def fetch_layout_test_results(self, results_url):
# FIXME: This should cache that the result was a 404 and stop hitting the network.
results_file = NetworkTransaction(convert_404_to_None=True).run(lambda: self._fetch_file_from_results(results_url, "full_results.json"))
return LayoutTestResults.results_from_string(results_file)
def url_encoded_name(self):
return urllib.quote(self._name)
def url(self):
return "%s/builders/%s" % (self._buildbot.buildbot_url, self.url_encoded_name())
# This provides a single place to mock
def _fetch_build(self, build_number):
build_dictionary = self._buildbot._fetch_build_dictionary(self, build_number)
if not build_dictionary:
return None
revision_string = build_dictionary['sourceStamp']['revision']
return Build(self,
build_number=int(build_dictionary['number']),
# 'revision' may be None if a trunk build was started by the force-build button on the web page.
revision=(int(revision_string) if revision_string else None),
# Buildbot uses any nubmer other than 0 to mean fail. Since we fetch with
# filter=1, passing builds may contain no 'results' value.
is_green=(not build_dictionary.get('results')),
)
def build(self, build_number):
if not build_number:
return None
cached_build = self._builds_cache.get(build_number)
if cached_build:
return cached_build
build = self._fetch_build(build_number)
self._builds_cache[build_number] = build
return build
def latest_cached_build(self):
revision_build_pairs = self.revision_build_pairs_with_results()
revision_build_pairs.sort(key=lambda i: i[1])
latest_build_number = revision_build_pairs[-1][1]
return self.build(latest_build_number)
def force_build(self, username="webkit-patch", comments=None):
def predicate(form):
try:
return form.find_control("username")
except Exception, e:
return False
# ignore false positives for missing Browser methods - pylint: disable=E1102
self._browser.open(self.url())
self._browser.select_form(predicate=predicate)
self._browser["username"] = username
if comments:
self._browser["comments"] = comments
return self._browser.submit()
file_name_regexp = re.compile(r"r(?P<revision>\d+) \((?P<build_number>\d+)\)")
def _revision_and_build_for_filename(self, filename):
# Example: "r47483 (1)/" or "r47483 (1).zip"
match = self.file_name_regexp.match(filename)
if not match:
return None
return (int(match.group("revision")), int(match.group("build_number")))
def _fetch_revision_to_build_map(self):
# All _fetch requests go through _buildbot for easier mocking
# FIXME: This should use NetworkTransaction's 404 handling instead.
try:
# FIXME: This method is horribly slow due to the huge network load.
# FIXME: This is a poor way to do revision -> build mapping.
# Better would be to ask buildbot through some sort of API.
print "Loading revision/build list from %s." % self.results_url()
print "This may take a while..."
result_files = self._buildbot._fetch_twisted_directory_listing(self.results_url())
except urllib2.HTTPError, error:
if error.code != 404:
raise
_log.debug("Revision/build list failed to load.")
result_files = []
return dict(self._file_info_list_to_revision_to_build_list(result_files))
def _file_info_list_to_revision_to_build_list(self, file_info_list):
# This assumes there was only one build per revision, which is false but we don't care for now.
revisions_and_builds = []
for file_info in file_info_list:
revision_and_build = self._revision_and_build_for_filename(file_info["filename"])
if revision_and_build:
revisions_and_builds.append(revision_and_build)
return revisions_and_builds
def _revision_to_build_map(self):
if not self._revision_to_build_number:
self._revision_to_build_number = self._fetch_revision_to_build_map()
return self._revision_to_build_number
def revision_build_pairs_with_results(self):
return self._revision_to_build_map().items()
# This assumes there can be only one build per revision, which is false, but we don't care for now.
def build_for_revision(self, revision, allow_failed_lookups=False):
# NOTE: This lookup will fail if that exact revision was never built.
build_number = self._revision_to_build_map().get(int(revision))
if not build_number:
return None
build = self.build(build_number)
if not build and allow_failed_lookups:
# Builds for old revisions with fail to lookup via buildbot's json api.
build = Build(self,
build_number=build_number,
revision=revision,
is_green=False,
)
return build
def find_regression_window(self, red_build, look_back_limit=30):
if not red_build or red_build.is_green():
return RegressionWindow(None, None)
common_failures = None
current_build = red_build
build_after_current_build = None
look_back_count = 0
while current_build:
if current_build.is_green():
# current_build can't possibly have any failures in common
# with red_build because it's green.
break
results = current_build.layout_test_results()
# We treat a lack of results as if all the test failed.
# This occurs, for example, when we can't compile at all.
if results:
failures = set(results.failing_tests())
if common_failures == None:
common_failures = failures
else:
common_failures = common_failures.intersection(failures)
if not common_failures:
# current_build doesn't have any failures in common with
# the red build we're worried about. We assume that any
# failures in current_build were due to flakiness.
break
look_back_count += 1
if look_back_count > look_back_limit:
return RegressionWindow(None, current_build, failing_tests=common_failures)
build_after_current_build = current_build
current_build = current_build.previous_build()
# We must iterate at least once because red_build is red.
assert(build_after_current_build)
# Current build must either be green or have no failures in common
# with red build, so we've found our failure transition.
return RegressionWindow(current_build, build_after_current_build, failing_tests=common_failures)
def find_blameworthy_regression_window(self, red_build_number, look_back_limit=30, avoid_flakey_tests=True):
red_build = self.build(red_build_number)
regression_window = self.find_regression_window(red_build, look_back_limit)
if not regression_window.build_before_failure():
return None # We ran off the limit of our search
# If avoid_flakey_tests, require at least 2 bad builds before we
# suspect a real failure transition.
if avoid_flakey_tests and regression_window.failing_build() == red_build:
return None
return regression_window
class Build(object):
def __init__(self, builder, build_number, revision, is_green):
self._builder = builder
self._number = build_number
self._revision = revision
self._is_green = is_green
@staticmethod
def build_url(builder, build_number):
return "%s/builds/%s" % (builder.url(), build_number)
def url(self):
return self.build_url(self.builder(), self._number)
def results_url(self):
results_directory = "r%s (%s)" % (self.revision(), self._number)
return "%s/%s" % (self._builder.results_url(), urllib.quote(results_directory))
def results_zip_url(self):
return "%s.zip" % self.results_url()
@memoized
def layout_test_results(self):
return self._builder.fetch_layout_test_results(self.results_url())
def builder(self):
return self._builder
def revision(self):
return self._revision
def is_green(self):
return self._is_green
def previous_build(self):
# previous_build() allows callers to avoid assuming build numbers are sequential.
# They may not be sequential across all master changes, or when non-trunk builds are made.
return self._builder.build(self._number - 1)
class BuildBot(object):
_builder_factory = Builder
_default_url = config_urls.buildbot_url
def __init__(self, url=None):
self.buildbot_url = url if url else self._default_url
self._builder_by_name = {}
def _parse_last_build_cell(self, builder, cell):
status_link = cell.find('a')
if status_link:
# Will be either a revision number or a build number
revision_string = status_link.string
# If revision_string has non-digits assume it's not a revision number.
builder['built_revision'] = int(revision_string) \
if not re.match('\D', revision_string) \
else None
# FIXME: We treat slave lost as green even though it is not to
# work around the Qts bot being on a broken internet connection.
# The real fix is https://bugs.webkit.org/show_bug.cgi?id=37099
builder['is_green'] = not re.search('fail', cell.renderContents()) or \
not not re.search('lost', cell.renderContents())
status_link_regexp = r"builders/(?P<builder_name>.*)/builds/(?P<build_number>\d+)"
link_match = re.match(status_link_regexp, status_link['href'])
builder['build_number'] = int(link_match.group("build_number"))
else:
# We failed to find a link in the first cell, just give up. This
# can happen if a builder is just-added, the first cell will just
# be "no build"
# Other parts of the code depend on is_green being present.
builder['is_green'] = False
builder['built_revision'] = None
builder['build_number'] = None
def _parse_current_build_cell(self, builder, cell):
activity_lines = cell.renderContents().split("<br />")
builder["activity"] = activity_lines[0] # normally "building" or "idle"
# The middle lines document how long left for any current builds.
match = re.match("(?P<pending_builds>\d) pending", activity_lines[-1])
builder["pending_builds"] = int(match.group("pending_builds")) if match else 0
def _parse_builder_status_from_row(self, status_row):
status_cells = status_row.findAll('td')
builder = {}
# First cell is the name
name_link = status_cells[0].find('a')
builder["name"] = unicode(name_link.string)
self._parse_last_build_cell(builder, status_cells[1])
self._parse_current_build_cell(builder, status_cells[2])
return builder
def _matches_regexps(self, builder_name, name_regexps):
for name_regexp in name_regexps:
if re.match(name_regexp, builder_name):
return True
return False
# FIXME: This method needs to die, but is used by a unit test at the moment.
def _builder_statuses_with_names_matching_regexps(self, builder_statuses, name_regexps):
return [builder for builder in builder_statuses if self._matches_regexps(builder["name"], name_regexps)]
# FIXME: These _fetch methods should move to a networking class.
def _fetch_build_dictionary(self, builder, build_number):
# Note: filter=1 will remove None and {} and '', which cuts noise but can
# cause keys to be missing which you might otherwise expect.
# FIXME: The bot sends a *huge* amount of data for each request, we should
# find a way to reduce the response size further.
json_url = "%s/json/builders/%s/builds/%s?filter=1" % (self.buildbot_url, urllib.quote(builder.name()), build_number)
try:
return json.load(urllib2.urlopen(json_url))
except urllib2.URLError, err:
build_url = Build.build_url(builder, build_number)
_log.error("Error fetching data for %s build %s (%s, json: %s): %s" % (builder.name(), build_number, build_url, json_url, err))
return None
except ValueError, err:
build_url = Build.build_url(builder, build_number)
_log.error("Error decoding json data from %s: %s" % (build_url, err))
return None
def _fetch_one_box_per_builder(self):
build_status_url = "%s/one_box_per_builder" % self.buildbot_url
return urllib2.urlopen(build_status_url)
def _file_cell_text(self, file_cell):
"""Traverses down through firstChild elements until one containing a string is found, then returns that string"""
element = file_cell
while element.string is None and element.contents:
element = element.contents[0]
return element.string
def _parse_twisted_file_row(self, file_row):
string_or_empty = lambda string: unicode(string) if string else u""
file_cells = file_row.findAll('td')
return {
"filename": string_or_empty(self._file_cell_text(file_cells[0])),
"size": string_or_empty(self._file_cell_text(file_cells[1])),
"type": string_or_empty(self._file_cell_text(file_cells[2])),
"encoding": string_or_empty(self._file_cell_text(file_cells[3])),
}
def _parse_twisted_directory_listing(self, page):
soup = BeautifulSoup(page)
# HACK: Match only table rows with a class to ignore twisted header/footer rows.
file_rows = soup.find('table').findAll('tr', {'class': re.compile(r'\b(?:directory|file)\b')})
return [self._parse_twisted_file_row(file_row) for file_row in file_rows]
# FIXME: There should be a better way to get this information directly from twisted.
def _fetch_twisted_directory_listing(self, url):
return self._parse_twisted_directory_listing(urllib2.urlopen(url))
def builders(self):
return [self.builder_with_name(status["name"]) for status in self.builder_statuses()]
# This method pulls from /one_box_per_builder as an efficient way to get information about
def builder_statuses(self):
soup = BeautifulSoup(self._fetch_one_box_per_builder())
return [self._parse_builder_status_from_row(status_row) for status_row in soup.find('table').findAll('tr')]
def builder_with_name(self, name):
builder = self._builder_by_name.get(name)
if not builder:
builder = self._builder_factory(name, self)
self._builder_by_name[name] = builder
return builder
def failure_map(self):
failure_map = FailureMap()
revision_to_failing_bots = {}
for builder_status in self.builder_statuses():
if builder_status["is_green"]:
continue
builder = self.builder_with_name(builder_status["name"])
regression_window = builder.find_blameworthy_regression_window(builder_status["build_number"])
if regression_window:
failure_map.add_regression_window(builder, regression_window)
return failure_map
# This makes fewer requests than calling Builder.latest_build would. It grabs all builder
# statuses in one request using self.builder_statuses (fetching /one_box_per_builder instead of builder pages).
def _latest_builds_from_builders(self):
builder_statuses = self.builder_statuses()
return [self.builder_with_name(status["name"]).build(status["build_number"]) for status in builder_statuses]
def _build_at_or_before_revision(self, build, revision):
while build:
if build.revision() <= revision:
return build
build = build.previous_build()
def _fetch_builder_page(self, builder):
builder_page_url = "%s/builders/%s?numbuilds=100" % (self.buildbot_url, urllib2.quote(builder.name()))
return urllib2.urlopen(builder_page_url)
def _revisions_for_builder(self, builder):
soup = BeautifulSoup(self._fetch_builder_page(builder))
revisions = []
for status_row in soup.find('table').findAll('tr'):
revision_anchor = status_row.find('a')
table_cells = status_row.findAll('td')
if not table_cells or len(table_cells) < 3 or not table_cells[2].string:
continue
if revision_anchor and revision_anchor.string and re.match(r'^\d+$', revision_anchor.string):
revisions.append((int(revision_anchor.string), 'success' in table_cells[2].string))
return revisions
def _find_green_revision(self, builder_revisions):
revision_statuses = {}
for builder in builder_revisions:
for revision, succeeded in builder_revisions[builder]:
revision_statuses.setdefault(revision, set())
if succeeded and revision_statuses[revision] != None:
revision_statuses[revision].add(builder)
else:
revision_statuses[revision] = None
# In descending order, look for a revision X with successful builds
# Once we found X, check if remaining builders succeeded in the neighborhood of X.
revisions_in_order = sorted(revision_statuses.keys(), reverse=True)
for i, revision in enumerate(revisions_in_order):
if not revision_statuses[revision]:
continue
builders_succeeded_in_future = set()
for future_revision in sorted(revisions_in_order[:i + 1]):
if not revision_statuses[future_revision]:
break
builders_succeeded_in_future = builders_succeeded_in_future.union(revision_statuses[future_revision])
builders_succeeded_in_past = set()
for past_revision in revisions_in_order[i:]:
if not revision_statuses[past_revision]:
break
builders_succeeded_in_past = builders_succeeded_in_past.union(revision_statuses[past_revision])
if len(builders_succeeded_in_future) == len(builder_revisions) and len(builders_succeeded_in_past) == len(builder_revisions):
return revision
return None
| bsd-3-clause |
mackjoner/flask-restful | flask_restful/paging.py | 51 | 1207 | from flask_restful.utils.crypto import decrypt, encrypt
DEFAULT_PAGE_SIZE = 50
def retrieve_next_page(key, seed, args, callback, initial_bookmark=None):
"""
A helper for the bookmark pager.
:param key: the private key of you API
:param seed: the crypo seed for this session
:param args: the verbatim filtering+paging arguments
:param callback: a function that takes (a dictionary of filters, the current bookmark, the page size)
and return a tuple (next_results, dictionary_ready_for_next_iteration, approx_number_of_element_left)
:param initial_bookmark: pass here an optional initial bookmark for the first request
:return: the tuple result_list and new encrypted bookmark
"""
filter = dict(args)
if 'pager_info' in filter:
initial_bookmark = decrypt(filter.pop('pager_info'), key, seed)
page_size = filter.pop('page_size', DEFAULT_PAGE_SIZE)
result_list, new_bookmark, approx_result_size = callback(filter, initial_bookmark, page_size)
# restore for the next iteration
filter['pager_info'] = encrypt(new_bookmark, key, seed)
filter['page_size'] = page_size
return result_list, filter, approx_result_size
| bsd-3-clause |
kylerbrown/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
WoLpH/CouchPotatoServer | libs/pyutil/scripts/randfile.py | 106 | 1948 | #!/usr/bin/env python
import os, sys
from random import randrange
import argparse
def main():
CHUNKSIZE=2**20
parser = argparse.ArgumentParser(prog="randfile", description="Create a file of pseudorandom bytes (not cryptographically secure).")
parser.add_argument('-b', '--num-bytes', help="how many bytes to write per output file (default 20)", type=int, metavar="BYTES", default=20)
parser.add_argument('-f', '--output-file-prefix', help="prefix of the name of the output file to create and fill with random bytes (default \"randfile\"", metavar="OUTFILEPRE", default="randfile")
parser.add_argument('-n', '--num-files', help="how many files to write (default 1)", type=int, metavar="FILES", default=1)
parser.add_argument('-F', '--force', help='overwrite any file already present', action='store_true')
parser.add_argument('-p', '--progress', help='write an "x" for every file completed and a "." for every %d bytes' % CHUNKSIZE, action='store_true')
args = parser.parse_args()
for i in xrange(args.num_files):
bytesleft = args.num_bytes
outputfname = args.output_file_prefix + "." + str(i)
if args.force:
f = open(outputfname, "wb")
else:
flags = os.O_WRONLY|os.O_CREAT|os.O_EXCL | (hasattr(os, 'O_BINARY') and os.O_BINARY)
fd = os.open(outputfname, flags)
f = os.fdopen(fd, "wb")
zs = [0]*CHUNKSIZE
ts = [256]*CHUNKSIZE
while bytesleft >= CHUNKSIZE:
f.write(''.join(map(chr, map(randrange, zs, ts))))
bytesleft -= CHUNKSIZE
if args.progress:
sys.stdout.write(".") ; sys.stdout.flush()
zs = [0]*bytesleft
ts = [256]*bytesleft
f.write(''.join(map(chr, map(randrange, zs, ts))))
if args.progress:
sys.stdout.write("x") ; sys.stdout.flush()
if __name__ == "__main__":
main()
| gpl-3.0 |
Yongliangdu/ThinkStats2 | code/chap02soln.py | 69 | 2263 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import sys
from operator import itemgetter
import first
import thinkstats2
def Mode(hist):
"""Returns the value with the highest frequency.
hist: Hist object
returns: value from Hist
"""
p, x = max([(p, x) for x, p in hist.Items()])
return x
def AllModes(hist):
"""Returns value-freq pairs in decreasing order of frequency.
hist: Hist object
returns: iterator of value-freq pairs
"""
return sorted(hist.Items(), key=itemgetter(1), reverse=True)
def WeightDifference(live, firsts, others):
"""Explore the difference in weight between first babies and others.
live: DataFrame of all live births
firsts: DataFrame of first babies
others: DataFrame of others
"""
mean0 = live.totalwgt_lb.mean()
mean1 = firsts.totalwgt_lb.mean()
mean2 = others.totalwgt_lb.mean()
var1 = firsts.totalwgt_lb.var()
var2 = others.totalwgt_lb.var()
print('Mean')
print('First babies', mean1)
print('Others', mean2)
print('Variance')
print('First babies', var1)
print('Others', var2)
print('Difference in lbs', mean1 - mean2)
print('Difference in oz', (mean1 - mean2) * 16)
print('Difference relative to mean (%age points)',
(mean1 - mean2) / mean0 * 100)
d = thinkstats2.CohenEffectSize(firsts.totalwgt_lb, others.totalwgt_lb)
print('Cohen d', d)
def main(script):
"""Tests the functions in this module.
script: string script name
"""
live, firsts, others = first.MakeFrames()
hist = thinkstats2.Hist(live.prglngth)
# explore the weight difference between first babies and others
WeightDifference(live, firsts, others)
# test Mode
mode = Mode(hist)
print('Mode of preg length', mode)
assert(mode == 39)
# test AllModes
modes = AllModes(hist)
assert(modes[0][1] == 4693)
for value, freq in modes[:5]:
print(value, freq)
print('%s: All tests passed.' % script)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
dajusc/trimesh | trimesh/voxel/ops.py | 1 | 12278 | import numpy as np
from .. import util
from ..constants import log
def fill_orthographic(dense):
shape = dense.shape
indices = np.stack(
np.meshgrid(*(np.arange(s) for s in shape), indexing='ij'),
axis=-1)
empty = np.logical_not(dense)
def fill_axis(axis):
base_local_indices = indices[..., axis]
local_indices = base_local_indices.copy()
local_indices[empty] = shape[axis]
mins = np.min(local_indices, axis=axis, keepdims=True)
local_indices = base_local_indices.copy()
local_indices[empty] = -1
maxs = np.max(local_indices, axis=axis, keepdims=True)
return np.logical_and(
base_local_indices >= mins,
base_local_indices <= maxs,
)
filled = fill_axis(axis=0)
for axis in range(1, len(shape)):
filled = np.logical_and(filled, fill_axis(axis))
return filled
def fill_base(sparse_indices):
"""
Given a sparse surface voxelization, fill in between columns.
Parameters
--------------
sparse_indices: (n, 3) int, location of filled cells
Returns
--------------
filled: (m, 3) int, location of filled cells
"""
# validate inputs
sparse_indices = np.asanyarray(sparse_indices, dtype=np.int64)
if not util.is_shape(sparse_indices, (-1, 3)):
raise ValueError('incorrect shape')
# create grid and mark inner voxels
max_value = sparse_indices.max() + 3
grid = np.zeros((max_value,
max_value,
max_value),
bool)
voxels_sparse = np.add(sparse_indices, 1)
grid[tuple(voxels_sparse.T)] = 1
for i in range(max_value):
check_dir2 = False
for j in range(0, max_value - 1):
idx = []
# find transitions first
# transition positions are from 0 to 1 and from 1 to 0
eq = np.equal(grid[i, j, :-1], grid[i, j, 1:])
idx = np.where(np.logical_not(eq))[0] + 1
c = len(idx)
check_dir2 = (c % 4) > 0 and c > 4
if c < 4:
continue
for s in range(0, c - c % 4, 4):
grid[i, j, idx[s]:idx[s + 3]] = 1
if not check_dir2:
continue
# check another direction for robustness
for k in range(0, max_value - 1):
idx = []
# find transitions first
eq = np.equal(grid[i, :-1, k], grid[i, 1:, k])
idx = np.where(np.logical_not(eq))[0] + 1
c = len(idx)
if c < 4:
continue
for s in range(0, c - c % 4, 4):
grid[i, idx[s]:idx[s + 3], k] = 1
# generate new voxels
filled = np.column_stack(np.where(grid))
filled -= 1
return filled
fill_voxelization = fill_base
def matrix_to_marching_cubes(matrix, pitch=1.0):
"""
Convert an (n, m, p) matrix into a mesh, using marching_cubes.
Parameters
-----------
matrix : (n, m, p) bool
Occupancy array
Returns
----------
mesh : trimesh.Trimesh
Mesh generated by meshing voxels using
the marching cubes algorithm in skimage
"""
from skimage import measure
from ..base import Trimesh
matrix = np.asanyarray(matrix, dtype=np.bool)
rev_matrix = np.logical_not(matrix) # Takes set about 0.
# Add in padding so marching cubes can function properly with
# voxels on edge of AABB
pad_width = 1
rev_matrix = np.pad(rev_matrix,
pad_width=(pad_width),
mode='constant',
constant_values=(1))
# pick between old and new API
if hasattr(measure, 'marching_cubes_lewiner'):
func = measure.marching_cubes_lewiner
else:
func = measure.marching_cubes
# Run marching cubes.
pitch = np.asanyarray(pitch)
if pitch.size == 1:
pitch = (pitch,) * 3
meshed = func(volume=rev_matrix,
level=.5, # it is a boolean voxel grid
spacing=pitch)
# allow results from either marching cubes function in skimage
# binaries available for python 3.3 and 3.4 appear to use the classic
# method
if len(meshed) == 2:
log.warning('using old marching cubes, may not be watertight!')
vertices, faces = meshed
normals = None
elif len(meshed) == 4:
vertices, faces, normals, vals = meshed
# Return to the origin, add in the pad_width
vertices = np.subtract(vertices, pad_width * pitch)
# create the mesh
mesh = Trimesh(vertices=vertices,
faces=faces,
vertex_normals=normals)
return mesh
def sparse_to_matrix(sparse):
"""
Take a sparse (n,3) list of integer indexes of filled cells,
turn it into a dense (m,o,p) matrix.
Parameters
-----------
sparse : (n, 3) int
Index of filled cells
Returns
------------
dense : (m, o, p) bool
Matrix of filled cells
"""
sparse = np.asanyarray(sparse, dtype=np.int)
if not util.is_shape(sparse, (-1, 3)):
raise ValueError('sparse must be (n,3)!')
shape = sparse.max(axis=0) + 1
matrix = np.zeros(np.product(shape), dtype=np.bool)
multiplier = np.array([np.product(shape[1:]), shape[2], 1])
index = (sparse * multiplier).sum(axis=1)
matrix[index] = True
dense = matrix.reshape(shape)
return dense
def points_to_marching_cubes(points, pitch=1.0):
"""
Mesh points by assuming they fill a voxel box, and then
running marching cubes on them
Parameters
------------
points : (n, 3) float
Points in 3D space
Returns
-------------
mesh : trimesh.Trimesh
Points meshed using marching cubes
"""
# make sure inputs are as expected
points = np.asanyarray(points, dtype=np.float64)
pitch = np.asanyarray(pitch, dtype=float)
# find the minimum value of points for origin
origin = points.min(axis=0)
# convert points to occupied voxel cells
index = ((points - origin) / pitch).round().astype(np.int64)
# convert voxel indices to a matrix
matrix = sparse_to_matrix(index)
# run marching cubes on the matrix to generate a mesh
mesh = matrix_to_marching_cubes(matrix, pitch=pitch)
mesh.vertices += origin
return mesh
def multibox(centers, pitch=1.0, colors=None):
"""
Return a Trimesh object with a box at every center.
Doesn't do anything nice or fancy.
Parameters
-----------
centers : (n, 3) float
Center of boxes that are occupied
pitch : float
The edge length of a voxel
colors : (3,) or (4,) or (n,3) or (n, 4) float
Color of boxes
Returns
---------
rough : Trimesh
Mesh object representing inputs
"""
from .. import primitives
from ..base import Trimesh
# get centers as numpy array
centers = np.asanyarray(
centers, dtype=np.float64)
# get a basic box
b = primitives.Box()
# apply the pitch
b.apply_scale(float(pitch))
# tile into one box vertex per center
v = np.tile(
centers,
(1, len(b.vertices))).reshape((-1, 3))
# offset to centers
v += np.tile(b.vertices, (len(centers), 1))
f = np.tile(b.faces, (len(centers), 1))
f += np.tile(
np.arange(len(centers)) * len(b.vertices),
(len(b.faces), 1)).T.reshape((-1, 1))
face_colors = None
if colors is not None:
colors = np.asarray(colors)
if colors.ndim == 1:
colors = colors[None].repeat(len(centers), axis=0)
if colors.ndim == 2 and len(colors) == len(centers):
face_colors = colors.repeat(12, axis=0)
mesh = Trimesh(vertices=v,
faces=f,
face_colors=face_colors)
return mesh
def boolean_sparse(a, b, operation=np.logical_and):
"""
Find common rows between two arrays very quickly
using 3D boolean sparse matrices.
Parameters
-----------
a: (n, d) int, coordinates in space
b: (m, d) int, coordinates in space
operation: numpy operation function, ie:
np.logical_and
np.logical_or
Returns
-----------
coords: (q, d) int, coordinates in space
"""
# 3D sparse arrays, using wrapped scipy.sparse
# pip install sparse
import sparse
# find the bounding box of both arrays
extrema = np.array([a.min(axis=0),
a.max(axis=0),
b.min(axis=0),
b.max(axis=0)])
origin = extrema.min(axis=0) - 1
size = tuple(extrema.ptp(axis=0) + 2)
# put nearby voxel arrays into same shape sparse array
sp_a = sparse.COO((a - origin).T,
data=np.ones(len(a), dtype=np.bool),
shape=size)
sp_b = sparse.COO((b - origin).T,
data=np.ones(len(b), dtype=np.bool),
shape=size)
# apply the logical operation
# get a sparse matrix out
applied = operation(sp_a, sp_b)
# reconstruct the original coordinates
coords = np.column_stack(applied.coords) + origin
return coords
def strip_array(data):
shape = data.shape
ndims = len(shape)
padding = []
slices = []
for dim, size in enumerate(shape):
axis = tuple(range(dim)) + tuple(range(dim + 1, ndims))
filled = np.any(data, axis=axis)
indices, = np.nonzero(filled)
pad_left = indices[0]
pad_right = indices[-1]
padding.append([pad_left, pad_right])
slices.append(slice(pad_left, pad_right))
return data[tuple(slices)], np.array(padding, int)
def indices_to_points(indices, pitch=None, origin=None):
"""
Convert indices of an (n,m,p) matrix into a set of voxel center points.
Parameters
----------
indices: (q, 3) int, index of voxel matrix (n,m,p)
pitch: float, what pitch was the voxel matrix computed with
origin: (3,) float, what is the origin of the voxel matrix
Returns
----------
points: (q, 3) float, list of points
"""
indices = np.asanyarray(indices)
if indices.shape[1:] != (3,):
from IPython import embed
embed()
raise ValueError('shape of indices must be (q, 3)')
points = np.array(indices, dtype=np.float64)
if pitch is not None:
points *= float(pitch)
if origin is not None:
origin = np.asanyarray(origin)
if origin.shape != (3,):
raise ValueError('shape of origin must be (3,)')
points += origin
return points
def matrix_to_points(matrix, pitch=None, origin=None):
"""
Convert an (n,m,p) matrix into a set of points for each voxel center.
Parameters
-----------
matrix: (n,m,p) bool, voxel matrix
pitch: float, what pitch was the voxel matrix computed with
origin: (3,) float, what is the origin of the voxel matrix
Returns
----------
points: (q, 3) list of points
"""
indices = np.column_stack(np.nonzero(matrix))
points = indices_to_points(indices=indices,
pitch=pitch,
origin=origin)
return points
def points_to_indices(points, pitch=None, origin=None):
"""
Convert center points of an (n,m,p) matrix into its indices.
Parameters
----------
points : (q, 3) float
Center points of voxel matrix (n,m,p)
pitch : float
What pitch was the voxel matrix computed with
origin : (3,) float
What is the origin of the voxel matrix
Returns
----------
indices : (q, 3) int
List of indices
"""
points = np.array(points, dtype=np.float64)
if points.shape != (points.shape[0], 3):
raise ValueError('shape of points must be (q, 3)')
if origin is not None:
origin = np.asanyarray(origin)
if origin.shape != (3,):
raise ValueError('shape of origin must be (3,)')
points -= origin
if pitch is not None:
points /= pitch
origin = np.asanyarray(origin, dtype=np.float64)
pitch = float(pitch)
indices = np.round(points).astype(int)
return indices
| mit |
abad623/verbalucce | verbalucce/nltk/corpus/reader/toolbox.py | 5 | 2078 | # Natural Language Toolkit: Toolbox Reader
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Greg Aumann <[email protected]>
# Stuart Robinson <[email protected]>
# Steven Bird <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Module for reading, writing and manipulating
Toolbox databases and settings fileids.
"""
import os
import re
import codecs
from nltk.toolbox import ToolboxData
from util import *
from api import *
class ToolboxCorpusReader(CorpusReader):
def xml(self, fileids, key=None):
return concat([ToolboxData(path, enc).parse(key=key)
for (path, enc) in self.abspaths(fileids, True)])
def fields(self, fileids, strip=True, unwrap=True, encoding=None,
errors='strict', unicode_fields=None):
return concat([list(ToolboxData(fileid,enc).fields(
strip, unwrap, encoding, errors, unicode_fields))
for (fileid, enc)
in self.abspaths(fileids, include_encoding=True)])
# should probably be done lazily:
def entries(self, fileids, **kwargs):
if 'key' in kwargs:
key = kwargs['key']
del kwargs['key']
else:
key = 'lx' # the default key in MDF
entries = []
for marker, contents in self.fields(fileids, **kwargs):
if marker == key:
entries.append((contents, []))
else:
try:
entries[-1][-1].append((marker, contents))
except IndexError:
pass
return entries
def words(self, fileids, key='lx'):
return [contents for marker, contents in self.fields(fileids) if marker == key]
def raw(self, fileids):
if fileids is None: fileids = self._fileids
elif isinstance(fileids, basestring): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def demo():
pass
if __name__ == '__main__':
demo()
| apache-2.0 |
peterm-itr/edx-platform | lms/djangoapps/open_ended_grading/views.py | 9 | 13661 | import logging
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from courseware.courses import get_course_with_access
from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError
import json
from student.models import unique_id_for_user
import open_ended_notifications
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import search
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.exceptions import NoPathToItem
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.utils.translation import ugettext as _
from open_ended_grading.utils import (
STAFF_ERROR_MESSAGE, StudentProblemList, generate_problem_url, create_controller_query_service
)
log = logging.getLogger(__name__)
def _reverse_with_slash(url_name, course_key):
"""
Reverses the URL given the name and the course id, and then adds a trailing slash if
it does not exist yet.
@param url_name: The name of the url (eg 'staff_grading').
@param course_id: The id of the course object (eg course.id).
@returns: The reversed url with a trailing slash.
"""
ajax_url = _reverse_without_slash(url_name, course_key)
if not ajax_url.endswith('/'):
ajax_url += '/'
return ajax_url
def _reverse_without_slash(url_name, course_key):
course_id = course_key.to_deprecated_string()
ajax_url = reverse(url_name, kwargs={'course_id': course_id})
return ajax_url
DESCRIPTION_DICT = {
'Peer Grading': _("View all problems that require peer assessment in this particular course."),
'Staff Grading': _("View ungraded submissions submitted by students for the open ended problems in the course."),
'Problems you have submitted': _("View open ended problems that you have previously submitted for grading."),
'Flagged Submissions': _("View submissions that have been flagged by students as inappropriate."),
}
ALERT_DICT = {
'Peer Grading': _("New submissions to grade"),
'Staff Grading': _("New submissions to grade"),
'Problems you have submitted': _("New grades have been returned"),
'Flagged Submissions': _("Submissions have been flagged for review"),
}
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def staff_grading(request, course_id):
"""
Show the instructor grading interface.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'staff', course_key)
ajax_url = _reverse_with_slash('staff_grading', course_key)
return render_to_response('instructor/staff_grading.html', {
'course': course,
'course_id': course_id,
'ajax_url': ajax_url,
# Checked above
'staff_access': True, })
def find_peer_grading_module(course):
"""
Given a course, finds the first peer grading module in it.
@param course: A course object.
@return: boolean found_module, string problem_url
"""
# Reverse the base course url.
base_course_url = reverse('courses')
found_module = False
problem_url = ""
# Get the peer grading modules currently in the course. Explicitly specify the course id to avoid issues with different runs.
items = modulestore().get_items(course.id, qualifiers={'category': 'peergrading'})
# See if any of the modules are centralized modules (ie display info from multiple problems)
items = [i for i in items if not getattr(i, "use_for_single_location", True)]
# Loop through all potential peer grading modules, and find the first one that has a path to it.
for item in items:
# Generate a url for the first module and redirect the user to it.
try:
problem_url_parts = search.path_to_location(modulestore(), item.location)
except NoPathToItem:
# In the case of nopathtoitem, the peer grading module that was found is in an invalid state, and
# can no longer be accessed. Log an informational message, but this will not impact normal behavior.
log.info(u"Invalid peer grading module location %s in course %s. This module may need to be removed.", item.location, course.id)
continue
problem_url = generate_problem_url(problem_url_parts, base_course_url)
found_module = True
return found_module, problem_url
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def peer_grading(request, course_id):
'''
When a student clicks on the "peer grading" button in the open ended interface, link them to a peer grading
xmodule in the course.
'''
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
#Get the current course
course = get_course_with_access(request.user, 'load', course_key)
found_module, problem_url = find_peer_grading_module(course)
if not found_module:
error_message = _("""
Error with initializing peer grading.
There has not been a peer grading module created in the courseware that would allow you to grade others.
Please check back later for this.
""")
log.exception(error_message + u"Current course is: {0}".format(course_id))
return HttpResponse(error_message)
return HttpResponseRedirect(problem_url)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def student_problem_list(request, course_id):
"""
Show a list of problems they have attempted to a student.
Fetch the list from the grading controller server and append some data.
@param request: The request object for this view.
@param course_id: The id of the course to get the problem list for.
@return: Renders an HTML problem list table.
"""
assert isinstance(course_id, basestring)
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
# Load the course. Don't catch any errors here, as we want them to be loud.
course = get_course_with_access(request.user, 'load', course_key)
# The anonymous student id is needed for communication with ORA.
student_id = unique_id_for_user(request.user)
base_course_url = reverse('courses')
error_text = ""
student_problem_list = StudentProblemList(course_key, student_id)
# Get the problem list from ORA.
success = student_problem_list.fetch_from_grading_service()
# If we fetched the problem list properly, add in additional problem data.
if success:
# Add in links to problems.
valid_problems = student_problem_list.add_problem_data(base_course_url)
else:
# Get an error message to show to the student.
valid_problems = []
error_text = student_problem_list.error_text
ajax_url = _reverse_with_slash('open_ended_problems', course_key)
context = {
'course': course,
'course_id': course_key.to_deprecated_string(),
'ajax_url': ajax_url,
'success': success,
'problem_list': valid_problems,
'error_text': error_text,
# Checked above
'staff_access': False,
}
return render_to_response('open_ended_problems/open_ended_problems.html', context)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def flagged_problem_list(request, course_id):
'''
Show a student problem list
'''
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'staff', course_key)
# call problem list service
success = False
error_text = ""
problem_list = []
# Make a service that can query edX ORA.
controller_qs = create_controller_query_service()
try:
problem_list_dict = controller_qs.get_flagged_problem_list(course_key)
success = problem_list_dict['success']
if 'error' in problem_list_dict:
error_text = problem_list_dict['error']
problem_list = []
else:
problem_list = problem_list_dict['flagged_submissions']
except GradingServiceError:
#This is a staff_facing_error
error_text = STAFF_ERROR_MESSAGE
#This is a dev_facing_error
log.error("Could not get flagged problem list from external grading service for open ended.")
success = False
# catch error if if the json loads fails
except ValueError:
#This is a staff_facing_error
error_text = STAFF_ERROR_MESSAGE
#This is a dev_facing_error
log.error("Could not parse problem list from external grading service response.")
success = False
ajax_url = _reverse_with_slash('open_ended_flagged_problems', course_key)
context = {
'course': course,
'course_id': course_id,
'ajax_url': ajax_url,
'success': success,
'problem_list': problem_list,
'error_text': error_text,
# Checked above
'staff_access': True,
}
return render_to_response('open_ended_problems/open_ended_flagged_problems.html', context)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def combined_notifications(request, course_id):
"""
Gets combined notifications from the grading controller and displays them
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
user = request.user
notifications = open_ended_notifications.combined_notifications(course, user)
response = notifications['response']
notification_tuples = open_ended_notifications.NOTIFICATION_TYPES
notification_list = []
for response_num in xrange(0, len(notification_tuples)):
tag = notification_tuples[response_num][0]
if tag in response:
url_name = notification_tuples[response_num][1]
human_name = notification_tuples[response_num][2]
url = _reverse_without_slash(url_name, course_key)
has_img = response[tag]
# check to make sure we have descriptions and alert messages
if human_name in DESCRIPTION_DICT:
description = DESCRIPTION_DICT[human_name]
else:
description = ""
if human_name in ALERT_DICT:
alert_message = ALERT_DICT[human_name]
else:
alert_message = ""
notification_item = {
'url': url,
'name': human_name,
'alert': has_img,
'description': description,
'alert_message': alert_message
}
#The open ended panel will need to link the "peer grading" button in the panel to a peer grading
#xmodule defined in the course. This checks to see if the human name of the server notification
#that we are currently processing is "peer grading". If it is, it looks for a peer grading
#module in the course. If none exists, it removes the peer grading item from the panel.
if human_name == "Peer Grading":
found_module, problem_url = find_peer_grading_module(course)
if found_module:
notification_list.append(notification_item)
else:
notification_list.append(notification_item)
ajax_url = _reverse_with_slash('open_ended_notifications', course_key)
combined_dict = {
'error_text': "",
'notification_list': notification_list,
'course': course,
'success': True,
'ajax_url': ajax_url,
}
return render_to_response('open_ended_problems/combined_notifications.html', combined_dict)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def take_action_on_flags(request, course_id):
"""
Takes action on student flagged submissions.
Currently, only support unflag and ban actions.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
if request.method != 'POST':
raise Http404
required = ['submission_id', 'action_type', 'student_id']
for key in required:
if key not in request.POST:
error_message = u'Missing key {0} from submission. Please reload and try again.'.format(key)
response = {
'success': False,
'error': STAFF_ERROR_MESSAGE + error_message
}
return HttpResponse(json.dumps(response), mimetype="application/json")
p = request.POST
submission_id = p['submission_id']
action_type = p['action_type']
student_id = p['student_id']
student_id = student_id.strip(' \t\n\r')
submission_id = submission_id.strip(' \t\n\r')
action_type = action_type.lower().strip(' \t\n\r')
# Make a service that can query edX ORA.
controller_qs = create_controller_query_service()
try:
response = controller_qs.take_action_on_flags(course_key, student_id, submission_id, action_type)
return HttpResponse(json.dumps(response), mimetype="application/json")
except GradingServiceError:
log.exception(
u"Error taking action on flagged peer grading submissions, "
u"submission_id: {0}, action_type: {1}, grader_id: {2}".format(
submission_id, action_type, student_id)
)
response = {
'success': False,
'error': STAFF_ERROR_MESSAGE
}
return HttpResponse(json.dumps(response), mimetype="application/json")
| agpl-3.0 |
Donkyhotay/MoonPy | zope/component/globalregistry.py | 1 | 6970 | ##############################################################################
#
# Copyright (c) 2006 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Global components support
$Id: globalregistry.py 69983 2006-09-05 21:41:15Z ctheune $
"""
import types
from zope.interface import implements
from zope.interface.adapter import AdapterRegistry
from zope.deprecation.deprecation import deprecate, deprecated
from zope.component.registry import Components
from zope.component.interfaces import Invalid, IComponentLookup, IRegistry
from zope.component.interfaces import ComponentLookupError
from zope.interface.interfaces import ISpecification
def GAR(components, registryName):
return getattr(components, registryName)
class GlobalAdapterRegistry(AdapterRegistry):
"""A global adapter registry
This adapter registry's main purpose is to be picklable in combination
with a site manager."""
def __init__(self, parent, name):
self.__parent__ = parent
self.__name__ = name
super(GlobalAdapterRegistry, self).__init__()
def __reduce__(self):
return GAR, (self.__parent__, self.__name__)
########################################################################
#
# BBB 2006/02/28 -- to be removed after 12 months
class _IGlobalSiteManager(IComponentLookup, IRegistry):
def provideAdapter(required, provided, name, factory, info=''):
"""Register an adapter factory
:Parameters:
- `required`: a sequence of specifications for objects to be
adapted.
- `provided`: The interface provided by the adapter
- `name`: The adapter name
- `factory`: The object used to compute the adapter
- `info`: Provide some info about this particular adapter.
"""
def subscribe(required, provided, factory, info=''):
"""Register a subscriber factory
:Parameters:
- `required`: a sequence of specifications for objects to be
adapted.
- `provided`: The interface provided by the adapter
- `name`: The adapter name
- `factory`: The object used to compute the subscriber
- `info`: Provide some info about this particular adapter.
"""
def provideUtility(providedInterface, component, name='', info='',
strict=True):
"""Register a utility
If strict is true, then the specified component *must* implement the
`providedInterface`. Turning strict off is particularly useful for
tests."""
#
########################################################################
class BaseGlobalComponents(Components):
implements(_IGlobalSiteManager)
def _init_registries(self):
self.adapters = GlobalAdapterRegistry(self, 'adapters')
self.utilities = GlobalAdapterRegistry(self, 'utilities')
def __reduce__(self):
# Global site managers are pickled as global objects
return self.__name__
####################################################################
#
# BBB 2006/02/28 -- to be removed after 12 months
@deprecate("The provideAdapter method of the global site manager has been "
"deprecated. Use registerAdapter instead.")
def provideAdapter(self, required, provided, name, factory, info=''):
self.registerAdapter(factory, required, provided, name, info)
@deprecate("The subscribe method of the global site manager has been "
"deprecated. Use registerHandler or registerSubscriptionAdapter "
"instead.")
def subscribe(self, required, provided, factory, info=''):
if provided is None:
self.registerHandler(factory, required, u'', info)
else:
self.registerSubscriptionAdapter(factory, required, provided,
info=info)
@deprecate("The provideUtility method of the global site manager has been "
"deprecated. Use registerUtility instead.")
def provideUtility(self, providedInterface, component, name='', info='',
strict=True):
if strict and not providedInterface.providedBy(component):
raise Invalid("The registered component doesn't provide "
"the promised interface.")
self.registerUtility(component, providedInterface, name, info)
@deprecate("The registrations method of the global site manager has been "
"deprecate. Use either registeredAdapters, registeredUtilities, "
"or registeredSubscriptionAdapters instead.")
def registrations(self):
for reg in self.registeredAdapters():
yield reg
for reg in self.registeredSubscriptionAdapters():
yield reg
for reg in self.registeredHandlers():
yield reg
for reg in self.registeredUtilities():
yield reg
#
####################################################################
base = BaseGlobalComponents('base')
from zope.testing.cleanup import addCleanUp
addCleanUp(lambda: base.__init__('base'))
del addCleanUp
globalSiteManager = base
def getGlobalSiteManager():
return globalSiteManager
# The following APIs provide global registration support for Python code.
# We eventually want to deprecate these in favor of using the global
# component registry directly.
def provideUtility(component, provides=None, name=u''):
base.registerUtility(component, provides, name, event=False)
def provideAdapter(factory, adapts=None, provides=None, name=''):
base.registerAdapter(factory, adapts, provides, name, event=False)
def provideSubscriptionAdapter(factory, adapts=None, provides=None):
base.registerSubscriptionAdapter(factory, adapts, provides, event=False)
def provideHandler(factory, adapts=None):
base.registerHandler(factory, adapts, event=False)
import zope.component._api # see http://www.zope.org/Collectors/Zope3-dev/674
# Ideally, we will switch to an explicit adapter hook registration. For now,
# if you provide an adapter, we want to make sure that the adapter hook is
# registered, and that registration depends on code in _api, which itself
# depends on code in this module. So, for now, we do another of these nasty
# circular import workarounds. See also standalonetests.py, as run by
# tests.py in StandaloneTests, for a test that fails without this hack, and
# succeeds with it.
| gpl-3.0 |
sametmax/Django--an-app-at-a-time | ignore_this_directory/django/contrib/messages/storage/cookie.py | 47 | 6532 | import json
from django.conf import settings
from django.contrib.messages.storage.base import BaseStorage, Message
from django.http import SimpleCookie
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.safestring import SafeData, mark_safe
class MessageEncoder(json.JSONEncoder):
"""
Compactly serialize instances of the ``Message`` class as JSON.
"""
message_key = '__json_message'
def default(self, obj):
if isinstance(obj, Message):
# Using 0/1 here instead of False/True to produce more compact json
is_safedata = 1 if isinstance(obj.message, SafeData) else 0
message = [self.message_key, is_safedata, obj.level, obj.message]
if obj.extra_tags:
message.append(obj.extra_tags)
return message
return super().default(obj)
class MessageDecoder(json.JSONDecoder):
"""
Decode JSON that includes serialized ``Message`` instances.
"""
def process_messages(self, obj):
if isinstance(obj, list) and obj:
if obj[0] == MessageEncoder.message_key:
if len(obj) == 3:
# Compatibility with previously-encoded messages
return Message(*obj[1:])
if obj[1]:
obj[3] = mark_safe(obj[3])
return Message(*obj[2:])
return [self.process_messages(item) for item in obj]
if isinstance(obj, dict):
return {key: self.process_messages(value)
for key, value in obj.items()}
return obj
def decode(self, s, **kwargs):
decoded = super().decode(s, **kwargs)
return self.process_messages(decoded)
class CookieStorage(BaseStorage):
"""
Store messages in a cookie.
"""
cookie_name = 'messages'
# uwsgi's default configuration enforces a maximum size of 4kb for all the
# HTTP headers. In order to leave some room for other cookies and headers,
# restrict the session cookie to 1/2 of 4kb. See #18781.
max_cookie_size = 2048
not_finished = '__messagesnotfinished__'
def _get(self, *args, **kwargs):
"""
Retrieve a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved
def _update_cookie(self, encoded_data, response):
"""
Either set the cookie with the encoded data if there is any data to
store, or delete the cookie.
"""
if encoded_data:
response.set_cookie(
self.cookie_name, encoded_data,
domain=settings.SESSION_COOKIE_DOMAIN,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None,
samesite=settings.SESSION_COOKIE_SAMESITE,
)
else:
response.delete_cookie(self.cookie_name, domain=settings.SESSION_COOKIE_DOMAIN)
def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Store the messages to a cookie and return a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, remove
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
encoded_data = self._encode(messages)
if self.max_cookie_size:
# data is going to be stored eventually by SimpleCookie, which
# adds its own overhead, which we must account for.
cookie = SimpleCookie() # create outside the loop
def stored_length(val):
return len(cookie.value_encode(val)[1])
while encoded_data and stored_length(encoded_data) > self.max_cookie_size:
if remove_oldest:
unstored_messages.append(messages.pop(0))
else:
unstored_messages.insert(0, messages.pop())
encoded_data = self._encode(messages + [self.not_finished],
encode_empty=unstored_messages)
self._update_cookie(encoded_data, response)
return unstored_messages
def _hash(self, value):
"""
Create an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
"""
key_salt = 'django.contrib.messages'
return salted_hmac(key_salt, value).hexdigest()
def _encode(self, messages, encode_empty=False):
"""
Return an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
return '%s$%s' % (self._hash(value), value)
def _decode(self, data):
"""
Safely decode an encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, return None.
"""
if not data:
return None
bits = data.split('$', 1)
if len(bits) == 2:
hash, value = bits
if constant_time_compare(hash, self._hash(value)):
try:
# If we get here (and the JSON decode works), everything is
# good. In any other case, drop back and return None.
return json.loads(value, cls=MessageDecoder)
except json.JSONDecodeError:
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None
| mit |
Fewbytes/cosmo-manager-rest-client | cosmo_manager_rest_client/swagger/BlueprintsApi.py | 1 | 4585 | ########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import requests
class BlueprintsApi(object):
def __init__(self, api_client):
self.api_client = api_client
def upload(self, tar_file_obj,
application_file_name=None,
blueprint_id=None):
"""Upload a new blueprint to Cloudify
Args:
tar_file_obj, File object of the tar gzipped
blueprint directory (required)
application_file_name, : File name of yaml containing
the main blueprint. (optional)
blueprint_id: Uploaded blueprint id (optional, plan name is used
if not provided)
Returns: BlueprintState
"""
query_params = {}
if application_file_name is not None:
query_params['application_file_name'] = \
self.api_client.toPathValue(application_file_name)
def file_gen():
buffer_size = 8192
while True:
read_bytes = tar_file_obj.read(buffer_size)
yield read_bytes
if len(read_bytes) < buffer_size:
return
if blueprint_id is not None:
resource_path = '/blueprints/{0}'.format(blueprint_id)
url = self.api_client.resource_url(resource_path)
response = requests.put(url,
params=query_params,
data=file_gen())
else:
resource_path = '/blueprints'
url = self.api_client.resource_url(resource_path)
response = requests.post(url,
params=query_params,
data=file_gen())
self.api_client.raise_if_not(201, response, url)
return self.api_client.deserialize(response.json(),
'BlueprintState')
def list(self):
"""Returns a list a submitted blueprints.
Args:
Returns: list[BlueprintState]
"""
resource_path = '/blueprints'
url = self.api_client.resource_url(resource_path)
response = requests.get(url)
self.api_client.raise_if_not(200, response, url)
return self.api_client.deserialize(response.json(),
'list[BlueprintState]')
def getById(self, blueprint_id):
"""Returns a blueprint by its id.
Args:
blueprint_id, : (optional)
Returns: BlueprintState
"""
resource_path = '/blueprints/{0}'.format(blueprint_id)
url = self.api_client.resource_url(resource_path)
response = requests.get(url)
self.api_client.raise_if_not(200, response, url)
return self.api_client.deserialize(response.json(),
'BlueprintState')
def validate(self, blueprint_id):
"""Validates a given blueprint.
Args:
blueprint_id, : (optional)
Returns: BlueprintValidationStatus
"""
resource_path = '/blueprints/{0}/validate'.format(blueprint_id)
url = self.api_client.resource_url(resource_path)
response = requests.get(url)
self.api_client.raise_if_not(200, response, url)
return self.api_client.deserialize(response.json(),
'BlueprintValidationStatus')
def delete(self, blueprint_id):
"""Deletes a given blueprint.
Args:
blueprint_id: str
Returns: BlueprintState
"""
resource_path = '/blueprints/{0}'.format(blueprint_id)
url = self.api_client.resource_url(resource_path)
response = requests.delete(url)
self.api_client.raise_if_not(201, response, url)
return self.api_client.deserialize(response.json(),
'BlueprintState')
| apache-2.0 |
ondras/TeaJS | deps/v8/tools/testrunner/objects/peer.py | 123 | 3561 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class Peer(object):
def __init__(self, address, jobs, rel_perf, pubkey):
self.address = address # string: IP address
self.jobs = jobs # integer: number of CPUs
self.relative_performance = rel_perf
self.pubkey = pubkey # string: pubkey's fingerprint
self.shells = set() # set of strings
self.needed_work = 0
self.assigned_work = 0
self.tests = [] # list of TestCase objects
self.trusting_me = False # This peer trusts my public key.
self.trusted = False # I trust this peer's public key.
def __str__(self):
return ("Peer at %s, jobs: %d, performance: %.2f, trust I/O: %s/%s" %
(self.address, self.jobs, self.relative_performance,
self.trusting_me, self.trusted))
def AddTests(self, shell):
"""Adds tests from |shell| to this peer.
Stops when self.needed_work reaches zero, or when all of shell's tests
are assigned."""
assert self.needed_work > 0
if shell.shell not in self.shells:
self.shells.add(shell.shell)
while len(shell.tests) > 0 and self.needed_work > 0:
t = shell.tests.pop()
self.needed_work -= t.duration
self.assigned_work += t.duration
shell.total_duration -= t.duration
self.tests.append(t)
def ForceAddOneTest(self, test, shell):
"""Forcibly adds another test to this peer, disregarding needed_work."""
if shell.shell not in self.shells:
self.shells.add(shell.shell)
self.needed_work -= test.duration
self.assigned_work += test.duration
shell.total_duration -= test.duration
self.tests.append(test)
def Pack(self):
"""Creates a JSON serializable representation of this Peer."""
return [self.address, self.jobs, self.relative_performance]
@staticmethod
def Unpack(packed):
"""Creates a Peer object built from a packed representation."""
pubkey_dummy = "" # Callers of this don't care (only the server does).
return Peer(packed[0], packed[1], packed[2], pubkey_dummy)
| bsd-3-clause |
hackers-terabit/portage | pym/portage/_legacy_globals.py | 4 | 2289 | # Copyright 2010 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
from portage import os
from portage.const import CACHE_PATH, PROFILE_PATH
def _get_legacy_global(name):
constructed = portage._legacy_globals_constructed
if name in constructed:
return getattr(portage, name)
if name == 'portdb':
portage.portdb = portage.db[portage.root]["porttree"].dbapi
constructed.add(name)
return getattr(portage, name)
elif name in ('mtimedb', 'mtimedbfile'):
portage.mtimedbfile = os.path.join(portage.settings['EROOT'],
CACHE_PATH, "mtimedb")
constructed.add('mtimedbfile')
portage.mtimedb = portage.MtimeDB(portage.mtimedbfile)
constructed.add('mtimedb')
return getattr(portage, name)
# Portage needs to ensure a sane umask for the files it creates.
os.umask(0o22)
kwargs = {}
for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"),
("target_root", "ROOT"), ("eprefix", "EPREFIX")):
kwargs[k] = os.environ.get(envvar)
portage._initializing_globals = True
portage.db = portage.create_trees(**kwargs)
constructed.add('db')
del portage._initializing_globals
settings = portage.db[portage.db._target_eroot]["vartree"].settings
portage.settings = settings
constructed.add('settings')
# Since portage.db now uses EROOT for keys instead of ROOT, we make
# portage.root refer to EROOT such that it continues to work as a key.
portage.root = portage.db._target_eroot
constructed.add('root')
# COMPATIBILITY
# These attributes should not be used within
# Portage under any circumstances.
portage.archlist = settings.archlist()
constructed.add('archlist')
portage.features = settings.features
constructed.add('features')
portage.groups = settings.get("ACCEPT_KEYWORDS", "").split()
constructed.add('groups')
portage.pkglines = settings.packages
constructed.add('pkglines')
portage.selinux_enabled = settings.selinux_enabled()
constructed.add('selinux_enabled')
portage.thirdpartymirrors = settings.thirdpartymirrors()
constructed.add('thirdpartymirrors')
profiledir = os.path.join(settings["PORTAGE_CONFIGROOT"], PROFILE_PATH)
if not os.path.isdir(profiledir):
profiledir = None
portage.profiledir = profiledir
constructed.add('profiledir')
return getattr(portage, name)
| gpl-2.0 |
qtekfun/htcDesire820Kernel | external/chromium_org/media/tools/layout_tests/trend_graph_unittest.py | 175 | 1288 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import unittest
from trend_graph import TrendGraph
class TestTrendGraph(unittest.TestCase):
def testUpdate(self):
test_graph_file_backup_path = os.path.join('test_data', 'graph.html.bak')
test_graph_file_path = os.path.join('test_data', 'graph.html')
shutil.copyfile(test_graph_file_backup_path, test_graph_file_path)
trend_graph = TrendGraph(test_graph_file_path)
data_map = {}
data_map['whole'] = (['test1'], 'undefined', 'undefined')
data_map['skip'] = (['test1', 'test2'], 'undefined', 'undefined')
data_map['nonskip'] = (['test1', 'test2', 'test3'], 'undefined',
'undefined')
data_map['passingrate'] = (str(4), 'undefined', 'undefined')
trend_graph.Update('2008,1,1,13,45,00', data_map)
# Assert the result graph from the file.
f = open(test_graph_file_path)
lines2 = f.readlines()
f.close()
line_count = 0
for line in lines2:
if '2008,0,1,13,45,00' in line:
line_count += 1
self.assertEqual(line_count, 2)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
YYWen0o0/python-frame-django | tests/utils_tests/test_text.py | 25 | 8128 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import skipUnless
import warnings
from django.test import SimpleTestCase
from django.utils import six, text
from django.utils.deprecation import RemovedInDjango19Warning
IS_WIDE_BUILD = (len('\U0001F4A9') == 1)
class TestUtilsText(SimpleTestCase):
def test_truncate_chars(self):
truncator = text.Truncator(
'The quick brown fox jumped over the lazy dog.'
)
self.assertEqual('The quick brown fox jumped over the lazy dog.',
truncator.chars(100)),
self.assertEqual('The quick brown fox ...',
truncator.chars(23)),
self.assertEqual('The quick brown fo.....',
truncator.chars(23, '.....')),
# Ensure that we normalize our unicode data first
nfc = text.Truncator('o\xfco\xfco\xfco\xfc')
nfd = text.Truncator('ou\u0308ou\u0308ou\u0308ou\u0308')
self.assertEqual('oüoüoüoü', nfc.chars(8))
self.assertEqual('oüoüoüoü', nfd.chars(8))
self.assertEqual('oü...', nfc.chars(5))
self.assertEqual('oü...', nfd.chars(5))
# Ensure the final length is calculated correctly when there are
# combining characters with no precomposed form, and that combining
# characters are not split up.
truncator = text.Truncator('-B\u030AB\u030A----8')
self.assertEqual('-B\u030A...', truncator.chars(5))
self.assertEqual('-B\u030AB\u030A-...', truncator.chars(7))
self.assertEqual('-B\u030AB\u030A----8', truncator.chars(8))
# Ensure the length of the end text is correctly calculated when it
# contains combining characters with no precomposed form.
truncator = text.Truncator('-----')
self.assertEqual('---B\u030A', truncator.chars(4, 'B\u030A'))
self.assertEqual('-----', truncator.chars(5, 'B\u030A'))
# Make a best effort to shorten to the desired length, but requesting
# a length shorter than the ellipsis shouldn't break
self.assertEqual('...', text.Truncator('asdf').chars(1))
def test_truncate_words(self):
truncator = text.Truncator('The quick brown fox jumped over the lazy '
'dog.')
self.assertEqual('The quick brown fox jumped over the lazy dog.',
truncator.words(10))
self.assertEqual('The quick brown fox...', truncator.words(4))
self.assertEqual('The quick brown fox[snip]',
truncator.words(4, '[snip]'))
def test_truncate_html_words(self):
truncator = text.Truncator('<p id="par"><strong><em>The quick brown fox'
' jumped over the lazy dog.</em></strong></p>')
self.assertEqual('<p id="par"><strong><em>The quick brown fox jumped over'
' the lazy dog.</em></strong></p>', truncator.words(10, html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox...</em>'
'</strong></p>', truncator.words(4, html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox....</em>'
'</strong></p>', truncator.words(4, '....', html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox</em>'
'</strong></p>', truncator.words(4, '', html=True))
# Test with new line inside tag
truncator = text.Truncator('<p>The quick <a href="xyz.html"\n'
'id="mylink">brown fox</a> jumped over the lazy dog.</p>')
self.assertEqual('<p>The quick <a href="xyz.html"\n'
'id="mylink">brown...</a></p>', truncator.words(3, '...', html=True))
# Test self-closing tags
truncator = text.Truncator('<br/>The <hr />quick brown fox jumped over'
' the lazy dog.')
self.assertEqual('<br/>The <hr />quick brown...',
truncator.words(3, '...', html=True))
truncator = text.Truncator('<br>The <hr/>quick <em>brown fox</em> '
'jumped over the lazy dog.')
self.assertEqual('<br>The <hr/>quick <em>brown...</em>',
truncator.words(3, '...', html=True))
# Test html entities
truncator = text.Truncator('<i>Buenos días!'
' ¿Cómo está?</i>')
self.assertEqual('<i>Buenos días! ¿Cómo...</i>',
truncator.words(3, '...', html=True))
truncator = text.Truncator('<p>I <3 python, what about you?</p>')
self.assertEqual('<p>I <3 python...</p>',
truncator.words(3, '...', html=True))
def test_wrap(self):
digits = '1234 67 9'
self.assertEqual(text.wrap(digits, 100), '1234 67 9')
self.assertEqual(text.wrap(digits, 9), '1234 67 9')
self.assertEqual(text.wrap(digits, 8), '1234 67\n9')
self.assertEqual(text.wrap('short\na long line', 7),
'short\na long\nline')
self.assertEqual(text.wrap('do-not-break-long-words please? ok', 8),
'do-not-break-long-words\nplease?\nok')
long_word = 'l%sng' % ('o' * 20)
self.assertEqual(text.wrap(long_word, 20), long_word)
self.assertEqual(text.wrap('a %s word' % long_word, 10),
'a\n%s\nword' % long_word)
def test_normalize_newlines(self):
self.assertEqual(text.normalize_newlines("abc\ndef\rghi\r\n"),
"abc\ndef\nghi\n")
self.assertEqual(text.normalize_newlines("\n\r\r\n\r"), "\n\n\n\n")
self.assertEqual(text.normalize_newlines("abcdefghi"), "abcdefghi")
self.assertEqual(text.normalize_newlines(""), "")
def test_normalize_newlines_bytes(self):
"""normalize_newlines should be able to handle bytes too"""
normalized = text.normalize_newlines(b"abc\ndef\rghi\r\n")
self.assertEqual(normalized, "abc\ndef\nghi\n")
self.assertIsInstance(normalized, six.text_type)
def test_slugify(self):
items = (
('Hello, World!', 'hello-world'),
('spam & eggs', 'spam-eggs'),
)
for value, output in items:
self.assertEqual(text.slugify(value), output)
def test_unescape_entities(self):
items = [
('', ''),
('foo', 'foo'),
('&', '&'),
('&', '&'),
('&', '&'),
('foo & bar', 'foo & bar'),
('foo & bar', 'foo & bar'),
]
for value, output in items:
self.assertEqual(text.unescape_entities(value), output)
def test_get_valid_filename(self):
filename = "^&'@{}[],$=!-#()%+~_123.txt"
self.assertEqual(text.get_valid_filename(filename), "-_123.txt")
def test_javascript_quote(self):
input = "<script>alert('Hello \\xff.\n Welcome\there\r');</script>"
output = r"<script>alert(\'Hello \\xff.\n Welcome\there\r\');<\/script>"
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInDjango19Warning)
self.assertEqual(text.javascript_quote(input), output)
# Exercising quote_double_quotes keyword argument
input = '"Text"'
self.assertEqual(text.javascript_quote(input), '"Text"')
self.assertEqual(text.javascript_quote(input, quote_double_quotes=True),
'"Text"')
@skipUnless(IS_WIDE_BUILD, 'Not running in a wide build of Python')
def test_javascript_quote_unicode(self):
input = "<script>alert('Hello \\xff.\n Wel𝕃come\there\r');</script>"
output = r"<script>alert(\'Hello \\xff.\n Wel𝕃come\there\r\');<\/script>"
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInDjango19Warning)
self.assertEqual(text.javascript_quote(input), output)
def test_deprecation(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
text.javascript_quote('thingy')
self.assertEqual(len(w), 1)
self.assertIn('escapejs()', repr(w[0].message))
| bsd-3-clause |
simonwydooghe/ansible | lib/ansible/modules/cloud/google/gcp_compute_node_template.py | 13 | 13957 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_node_template
description:
- Represents a NodeTemplate resource. Node templates specify properties for creating
sole-tenant nodes, such as node type, vCPU and memory requirements, node affinity
labels, and region.
short_description: Creates a GCP NodeTemplate
version_added: '2.10'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
description:
description:
- An optional textual description of the resource.
required: false
type: str
name:
description:
- Name of the resource.
required: false
type: str
node_affinity_labels:
description:
- Labels to use for node affinity, which will be used in instance scheduling.
required: false
type: dict
node_type:
description:
- Node type to use for nodes group that are created from this template.
- Only one of nodeTypeFlexibility and nodeType can be specified.
required: false
type: str
node_type_flexibility:
description:
- Flexible properties for the desired node type. Node groups that use this node
template will create nodes of a type that matches these properties. Only one
of nodeTypeFlexibility and nodeType can be specified.
required: false
type: dict
suboptions:
cpus:
description:
- Number of virtual CPUs to use.
required: false
type: str
memory:
description:
- Physical memory available to the node, defined in MB.
required: false
type: str
region:
description:
- Region where nodes using the node template will be created .
required: true
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/nodeTemplates)'
- 'Sole-Tenant Nodes: U(https://cloud.google.com/compute/docs/nodes/)'
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a node template
gcp_compute_node_template:
name: test_object
region: us-central1
node_type: n1-node-96-624
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional textual description of the resource.
returned: success
type: str
name:
description:
- Name of the resource.
returned: success
type: str
nodeAffinityLabels:
description:
- Labels to use for node affinity, which will be used in instance scheduling.
returned: success
type: dict
nodeType:
description:
- Node type to use for nodes group that are created from this template.
- Only one of nodeTypeFlexibility and nodeType can be specified.
returned: success
type: str
nodeTypeFlexibility:
description:
- Flexible properties for the desired node type. Node groups that use this node
template will create nodes of a type that matches these properties. Only one of
nodeTypeFlexibility and nodeType can be specified.
returned: success
type: complex
contains:
cpus:
description:
- Number of virtual CPUs to use.
returned: success
type: str
memory:
description:
- Physical memory available to the node, defined in MB.
returned: success
type: str
localSsd:
description:
- Use local SSD .
returned: success
type: str
region:
description:
- Region where nodes using the node template will be created .
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import re
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
name=dict(type='str'),
node_affinity_labels=dict(type='dict'),
node_type=dict(type='str'),
node_type_flexibility=dict(type='dict', options=dict(cpus=dict(type='str'), memory=dict(type='str'))),
region=dict(required=True, type='str'),
),
mutually_exclusive=[['node_type', 'node_type_flexibility']],
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#nodeTemplate'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
delete(module, self_link(module), kind)
create(module, collection(module), kind)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#nodeTemplate',
u'description': module.params.get('description'),
u'name': module.params.get('name'),
u'nodeAffinityLabels': module.params.get('node_affinity_labels'),
u'nodeType': module.params.get('node_type'),
u'nodeTypeFlexibility': NodeTemplateNodetypeflexibility(module.params.get('node_type_flexibility', {}), module).to_request(),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/nodeTemplates/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/nodeTemplates".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': response.get(u'description'),
u'name': response.get(u'name'),
u'nodeAffinityLabels': response.get(u'nodeAffinityLabels'),
u'nodeType': response.get(u'nodeType'),
u'nodeTypeFlexibility': NodeTemplateNodetypeflexibility(response.get(u'nodeTypeFlexibility', {}), module).from_response(),
}
def region_selflink(name, params):
if name is None:
return
url = r"https://www.googleapis.com/compute/v1/projects/.*/regions/.*"
if not re.match(url, name):
name = "https://www.googleapis.com/compute/v1/projects/{project}/regions/%s".format(**params) % name
return name
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#nodeTemplate')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class NodeTemplateNodetypeflexibility(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'cpus': self.request.get('cpus'), u'memory': self.request.get('memory')})
def from_response(self):
return remove_nones_from_dict({u'cpus': self.request.get(u'cpus'), u'memory': self.request.get(u'memory')})
if __name__ == '__main__':
main()
| gpl-3.0 |
berezovskyi/nikola | tests/data/translated_titles/conf.py | 16 | 27281 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
# !! This is the configuration of Nikola. !!#
# !! You should edit it to your liking. !!#
# ! Some settings can be different in different languages.
# ! A comment stating (translatable) is used to denote those.
# ! There are two ways to specify a translatable setting:
# ! (a) BLOG_TITLE = "My Blog"
# ! (b) BLOG_TITLE = {"en": "My Blog", "es": "Mi Blog"}
# ! Option (a) is there for backwards compatibility and when you don't
# ! want that setting translated.
# ! Option (b) should be used for settings that are different in
# ! different languages.
# Data about this site
BLOG_AUTHOR = "Your Name" # (translatable)
BLOG_TITLE = "Demo Site" # (translatable)
# This is the main URL for your site. It will be used
# in a prominent link
SITE_URL = "https://example.com/"
# This is the URL where nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "https://example.com/"
BLOG_EMAIL = "[email protected]"
BLOG_DESCRIPTION = "This is a demo site for Nikola." # (translatable)
# Nikola is multilingual!
#
# Currently supported languages are:
# bg Bulgarian
# ca Catalan
# cs Czech [ALTERNATIVELY cz]
# de German
# el Greek [NOT gr!]
# en English
# eo Esperanto
# es Spanish
# et Estonian
# eu Basque
# fa Persian
# fi Finnish
# fr French
# hi Hindi
# hr Croatian
# it Italian
# ja Japanese [NOT jp!]
# nb Norwegian Bokmål
# nl Dutch
# pt_br Portuguese (Brasil)
# pl Polish
# ru Russian
# sl Slovenian [NOT sl_si!]
# tr Turkish (Turkey) [NOT tr_tr!]
# ur Urdu
# zh_cn Chinese (Simplified)
#
# If you want to use Nikola with a non-supported language you have to provide
# a module containing the necessary translations
# (cf. the modules at nikola/data/themes/base/messages/).
# If a specific post is not translated to a language, then the version
# in the default language will be shown instead.
# What is the default language?
DEFAULT_LANG = "en"
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
"en": "",
"pl": "./pl",
}
# What will translated input files be named like?
# If you have a page something.rst, then something.pl.rst will be considered
# its Polish translation.
# (in the above example: path == "something", ext == "rst", lang == "pl")
# this pattern is also used for metadata:
# something.meta -> something.pl.meta
TRANSLATIONS_PATTERN = "{path}.{lang}.{ext}"
# Links for the sidebar / navigation bar.
# You should provide a key-value pair for each used language.
# (the same way you would do with a (translatable) setting.)
NAVIGATION_LINKS = {
DEFAULT_LANG: (
('/archive.html', 'Archives'),
('/categories/index.html', 'Tags'),
('/rss.xml', 'RSS'),
),
}
# Below this point, everything is optional
# While nikola can select a sensible locale for each language,
# sometimes explicit control can come handy.
# In this file we express locales in the string form that
# python's locales will accept in your OS, by example
# "en_US.utf8" in unix-like OS, "English_United States" in Windows.
# LOCALES = dict mapping language --> explicit locale for the languages
# in TRANSLATIONS. You can ommit one or more keys.
# LOCALE_FALLBACK = locale to use when an explicit locale is unavailable
# LOCALE_DEFAULT = locale to use for languages not mentioned in LOCALES; if
# not set the default Nikola mapping is used.
# POSTS and PAGES contains (wildcard, destination, template) tuples.
#
# The wildcard is used to generate a list of reSt source files
# (whatever/thing.txt).
#
# That fragment could have an associated metadata file (whatever/thing.meta),
# and optionally translated files (example for spanish, with code "es"):
# whatever/thing.es.txt and whatever/thing.es.meta
#
# This assumes you use the default TRANSLATIONS_PATTERN.
#
# From those files, a set of HTML fragment files will be generated:
# cache/whatever/thing.html (and maybe cache/whatever/thing.html.es)
#
# These files are combinated with the template to produce rendered
# pages, which will be placed at
# output / TRANSLATIONS[lang] / destination / pagename.html
#
# where "pagename" is the "slug" specified in the metadata file.
#
# The difference between POSTS and PAGES is that POSTS are added
# to feeds and are considered part of a blog, while PAGES are
# just independent HTML pages.
#
POSTS = (
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.rst", "stories", "story.tmpl"),
("stories/*.txt", "stories", "story.tmpl"),
)
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of "source" "relative destination".
# Default is:
# FILES_FOLDERS = {'files': '' }
# Which means copy 'files' into 'output'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# 'rest' is reStructuredText
# 'markdown' is MarkDown
# 'html' assumes the file is html and just copies it
COMPILERS = {
"rest": ('.rst', '.txt'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm'),
# PHP files are rendered the usual way (i.e. with the full templates).
# The resulting files have .php extensions, making it possible to run
# them without reconfiguring your server to recognize them.
"php": ('.php',),
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ('.rst', '.md', '.txt'),
}
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
# ONE_FILE_POSTS = True
# If this is set to True, the DEFAULT_LANG version will be displayed for
# untranslated posts.
# If this is set to False, then posts that are not translated to a language
# LANG will not be visible at all in the pages in that language.
# Formerly known as HIDE_UNTRANSLATED_POSTS (inverse)
# SHOW_UNTRANSLATED_POSTS = True
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag.xml (RSS feed for a tag)
# TAG_PATH = "categories"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = True
# Final location for the main blog page and sibling paginated pages is
# output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# INDEX_PATH = ""
# Create per-month archives instead of per-year
# CREATE_MONTHLY_ARCHIVE = False
# Create one large archive instead of per-year
# CREATE_SINGLE_ARCHIVE = False
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# URLs to other posts/pages can take 3 forms:
# rel_path: a relative URL to the current page/post (default)
# full_path: a URL with the full path from the root
# absolute: a complete URL (that includes the SITE_URL)
# URL_TYPE = 'rel_path'
# Final location for the blog main RSS feed is:
# output / TRANSLATION[lang] / RSS_PATH / rss.xml
# RSS_PATH = ""
# Number of posts in RSS feeds
# FEED_LENGTH = 10
# Slug the Tag URL easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
REDIRECTIONS = []
# Commands to execute to deploy. Can be anything, for example,
# you may use rsync:
# "rsync -rav --delete output/ [email protected]:/srv/www/site"
# And then do a backup, or run `nikola ping` from the `ping`
# plugin (`nikola install_plugin ping`).
# To do manual deployment, set it to []
# DEPLOY_COMMANDS = []
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, there are no filters.
#
# Many filters are shipped with Nikola. A list is available in the manual:
# <https://getnikola.com/handbook.html#post-processing-filters>
# FILTERS = {
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
# }
# Expert setting! Create a gzipped copy of each generated file. Cheap server-
# side optimization for very high traffic sites or low memory servers.
# GZIP_FILES = False
# File extensions that will be compressed
# GZIP_EXTENSIONS = ('.txt', '.htm', '.html', '.css', '.js', '.json', '.xml')
# Use an external gzip command? None means no.
# Example: GZIP_COMMAND = "pigz -k {filename}"
# GZIP_COMMAND = None
# Make sure the server does not return a "Accept-Ranges: bytes" header for
# files compressed by this option! OR make sure that a ranged request does not
# return partial content of another representation for these resources. Do not
# use this feature if you do not understand what this means.
# Compiler to process LESS files.
# LESS_COMPILER = 'lessc'
# A list of options to pass to the LESS compiler.
# Final command is: LESS_COMPILER LESS_OPTIONS file.less
# LESS_OPTIONS = []
# Compiler to process Sass files.
# SASS_COMPILER = 'sass'
# A list of options to pass to the Sass compiler.
# Final command is: SASS_COMPILER SASS_OPTIONS file.s(a|c)ss
# SASS_OPTIONS = []
# #############################################################################
# Image Gallery Options
# #############################################################################
# Galleries are folders in galleries/
# Final location of galleries will be output / GALLERY_PATH / gallery_name
# GALLERY_PATH = "galleries"
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
# USE_FILENAME_AS_TITLE = True
# EXTRA_IMAGE_EXTENSIONS = []
#
# If set to False, it will sort by filename instead. Defaults to True
# GALLERY_SORT_BY_DATE = True
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes.
# INDEXES_PAGES defaults to 'old posts, page %d' or 'page %d' (translated),
# depending on the value of INDEXES_PAGES_MAIN.
# INDEXES_TITLE = "" # If this is empty, defaults to BLOG_TITLE
# INDEXES_PAGES = "" # If this is empty, defaults to '[old posts,] page %d' (see above)
# INDEXES_PAGES_MAIN = False # If True, INDEXES_PAGES is also displayed on
# # the main (the newest) index page (index.html)
# Name of the theme to use.
THEME = "bootstrap3"
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored.
# Can be any of autumn borland bw colorful default emacs friendly fruity manni
# monokai murphy native pastie perldoc rrt tango trac vim vs
# CODE_COLOR_SCHEME = 'default'
# If you use 'site-reveal' theme you can select several subthemes
# THEME_REVEAL_CONFIG_SUBTHEME = 'sky'
# You can also use: beige/serif/simple/night/default
# Again, if you use 'site-reveal' theme you can select several transitions
# between the slides
# THEME_REVEAL_CONFIG_TRANSITION = 'cube'
# You can also use: page/concave/linear/none/default
# date format used to display post dates.
# (str used by datetime.datetime.strftime)
# DATE_FORMAT = '%Y-%m-%d %H:%M'
# FAVICONS contains (name, file, size) tuples.
# Used for create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
# For creating favicons, take a look at:
# http://www.netmagazine.com/features/create-perfect-favicon
# FAVICONS = {
# ("icon", "/favicon.ico", "16x16"),
# ("icon", "/icon_128x128.png", "128x128"),
# }
# Show only teasers in the index pages? Defaults to False.
# INDEX_TEASERS = False
# A HTML fragment with the Read more... link.
# The following tags exist and are replaced for you:
# {link} A link to the full post page.
# {read_more} The string “Read more” in the current language.
# {{ A literal { (U+007B LEFT CURLY BRACKET)
# }} A literal } (U+007D RIGHT CURLY BRACKET)
# READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# A HTML fragment describing the license, for the sidebar.
# (translatable)
LICENSE = ""
# I recommend using the Creative Commons' wizard:
# http://creativecommons.org/choose/
# LICENSE = """
# <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/2.5/ar/">
# <img alt="Creative Commons License BY-NC-SA"
# style="border-width:0; margin-bottom:12px;"
# src="http://i.creativecommons.org/l/by-nc-sa/2.5/ar/88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# (translatable)
CONTENT_FOOTER = 'Contents © {date} <a href="mailto:{email}">{author}</a> - Powered by <a href="https://getnikola.com/" rel="nofollow">Nikola</a> {license}'
# Things that will be passed to CONTENT_FOOTER.format(). This is done
# for translatability, as dicts are not formattable. Nikola will
# intelligently format the setting properly.
# The setting takes a dict. The keys are languages. The values are
# tuples of tuples of positional arguments and dicts of keyword arguments
# to format(). For example, {'en': (('Hello'), {'target': 'World'})}
# results in CONTENT_FOOTER['en'].format('Hello', target='World').
# WARNING: If you do not use multiple languages with CONTENT_FOOTER, this
# still needs to be a dict of this format. (it can be empty if you
# do not need formatting)
# (translatable)
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE
}
)
}
# To use comments, you can choose between different third party comment
# systems, one of "disqus", "livefyre", "intensedebate", "moot",
# "googleplus", "facebook" or "isso"
COMMENT_SYSTEM = "disqus"
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = "nikolademo"
# Enable annotations using annotateit.org?
# If set to False, you can still enable them for individual posts and pages
# setting the "annotations" metadata.
# If set to True, you can disable them for individual posts and pages using
# the "noannotations" metadata.
# ANNOTATIONS = False
# Create index.html for story folders?
# STORY_INDEX = False
# Enable comments on story pages?
# COMMENTS_IN_STORIES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
# (Note: This was briefly STRIP_INDEX_HTML in v 5.4.3 and 5.4.4)
# Default = False
# STRIP_INDEXES = False
# Should the sitemap list directories which only include other directories
# and no files.
# Default to True
# If this is False
# e.g. /2012 includes only /01, /02, /03, /04, ...: don't add it to the sitemap
# if /2012 includes any files (including index.html)... add it to the sitemap
# SITEMAP_INCLUDE_FILELESS_DIRS = True
# Instead of putting files in <slug>.html, put them in
# <slug>/index.html. Also enables STRIP_INDEXES
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata
# PRETTY_URLS = False
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html
# SCHEDULE_RULE = ''
# If True, use the scheduling rule to all posts by default
# SCHEDULE_ALL = False
# If True, schedules post to today if possible, even if scheduled hour is over
# SCHEDULE_FORCE_TODAY = False
# Do you want a add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you are using the compile-ipynb plugin, just add this one:
# MATHJAX_CONFIG = """
# <script type="text/x-mathjax-config">
# MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ]
# },
# displayAlign: 'left', // Change this to 'center' to center equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
# });
# </script>
# """
# Do you want to customize the nbconversion of your IPython notebook?
# IPYNB_CONFIG = {}
# With the following example configuracion you can use a custom jinja template
# called `toggle.tpl` which has to be located in your site/blog main folder:
# IPYNB_CONFIG = {'Exporter':{'template_file': 'toggle'}}
# What MarkDown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# MARKDOWN_EXTENSIONS = ['fenced_code', 'codehilite']
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty.
# (translatable)
# SOCIAL_BUTTONS_CODE = """
# <!-- Social buttons -->
# <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style">
# <a class="addthis_button_more">Share</a>
# <ul><li><a class="addthis_button_facebook"></a>
# <li><a class="addthis_button_google_plusone_share"></a>
# <li><a class="addthis_button_linkedin"></a>
# <li><a class="addthis_button_twitter"></a>
# </ul>
# </div>
# <script src="//s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
# <!-- End of social buttons -->
# """
# Show link to source for the posts?
# Formerly known as HIDE_SOURCELINK (inverse)
# SHOW_SOURCELINK = True
# Copy the source files for your pages?
# Setting it to False implies SHOW_SOURCELINK = False
# COPY_SOURCES = True
# Modify the number of Post per Index Page
# Defaults to 10
# INDEX_DISPLAY_POST_COUNT = 10
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a feedburner feed or something else.
# RSS_LINK = None
# Show only teasers in the RSS feed? Default to True
# RSS_TEASERS = True
# Strip HTML in the RSS feed? Default to False
# RSS_PLAIN = False
# A search form to search this site, for the sidebar. You can use a google
# custom search (http://www.google.com/cse/)
# Or a duckduckgo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# (translatable)
# SEARCH_FORM = ""
#
# This search form works for any site and looks good in the "site" theme where
# it appears on the navigation bar:
#
# SEARCH_FORM = """
# <!-- Custom search -->
# <form method="get" id="search" action="//duckduckgo.com/"
# class="navbar-form pull-left">
# <input type="hidden" name="sites" value="%s"/>
# <input type="hidden" name="k8" value="#444444"/>
# <input type="hidden" name="k9" value="#D51920"/>
# <input type="hidden" name="kt" value="h"/>
# <input type="text" name="q" maxlength="255"
# placeholder="Search…" class="span2" style="margin-top: 4px;"/>
# <input type="submit" value="DuckDuckGo Search" style="visibility: hidden;" />
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
#
# If you prefer a google search form, here's an example that should just work:
# SEARCH_FORM = """
# <!-- Custom search with google-->
# <form id="search" action="//www.google.com/search" method="get" class="navbar-form pull-left">
# <input type="hidden" name="q" value="site:%s" />
# <input type="text" name="q" maxlength="255" results="0" placeholder="Search"/>
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
# Also, there is a local search plugin you can use, based on Tipue, but it requires setting several
# options:
# SEARCH_FORM = """
# <span class="navbar-form pull-left">
# <input type="text" id="tipue_search_input">
# </span>"""
#
# BODY_END = """
# <script src="/assets/js/tipuesearch_set.js"></script>
# <script src="/assets/js/tipuesearch.js"></script>
# <script>
# $(document).ready(function() {
# $('#tipue_search_input').tipuesearch({
# 'mode': 'json',
# 'contentLocation': '/assets/js/tipuesearch_content.json',
# 'showUrl': false
# });
# });
# </script>
# """
# EXTRA_HEAD_DATA = """
# <link rel="stylesheet" type="text/css" href="/assets/css/tipuesearch.css">
# <div id="tipue_search_content" style="margin-left: auto; margin-right: auto; padding: 20px;"></div>
# """
# ENABLED_EXTRAS = ['local_search']
#
# Use content distribution networks for jquery and twitter-bootstrap css and js
# If this is True, jquery and html5shiv are served from the Google CDN and
# Bootstrap is served from BootstrapCDN (provided by MaxCDN)
# Set this to False if you want to host your site without requiring access to
# external resources.
# USE_CDN = False
# Extra things you want in the pages HEAD tag. This will be added right
# before </head>
# (translatable)
# EXTRA_HEAD_DATA = ""
# Google Analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# (translatable)
# BODY_END = ""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md'
# FILE_METADATA_REGEXP = None
# Additional metadata that is added to a post when creating a new_post
# ADDITIONAL_METADATA = {}
# Nikola supports Twitter Card summaries / Open Graph.
# Twitter cards make it possible for you to attach media to Tweets
# that link to your content.
#
# IMPORTANT:
# Please note, that you need to opt-in for using Twitter Cards!
# To do this please visit
# https://dev.twitter.com/form/participate-twitter-cards
#
# Uncomment and modify to following lines to match your accounts.
# Specifying the id for either 'site' or 'creator' will be preferred
# over the cleartext username. Specifying an ID is not necessary.
# Displaying images is currently not supported.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards / Open Graph
# # 'site': '@website', # twitter nick for the website
# # 'site:id': 123456, # Same as site, but the website's Twitter user ID
# # instead.
# # 'creator': '@username', # Username for the content creator / author.
# # 'creator:id': 654321, # Same as creator, but the Twitter user's ID.
# }
# Post's dates are considered in UTC by default, if you want to use
# another time zone, please set TIMEZONE to match. Check the available
# list from Wikipedia:
# http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# (eg. 'Europe/Zurich')
# Also, if you want to use a different time zone in some of your posts,
# you can use W3C-DTF Format (ex. 2012-03-30T23:00:00+02:00)
#
# TIMEZONE = 'UTC'
# If webassets is installed, bundle JS and CSS to make site loading faster
# USE_BUNDLES = True
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Add the absolute paths to directories containing plugins to use them.
# For example, the `plugins` directory of your clone of the Nikola plugins
# repository.
# EXTRA_PLUGINS_DIRS = []
# Experimental plugins - use at your own risk.
# They probably need some manual adjustments - please see their respective
# readme.
# ENABLED_EXTRAS = [
# 'planetoid',
# 'ipynb',
# 'local_search',
# 'render_mustache',
# ]
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# If set to True, enable optional hyphenation in your posts (requires pyphen)
# HYPHENATE = False
# The <hN> tags in HTML generated by certain compilers (reST/Markdown)
# will be demoted by that much (1 → h1 will become h2 and so on)
# This was a hidden feature of the Markdown and reST compilers in the
# past. Useful especially if your post titles are in <h1> tags too, for
# example.
# (defaults to 1.)
# DEMOTE_HEADERS = 1
# You can configure the logging handlers installed as plugins or change the
# log level of the default stdout handler.
LOGGING_HANDLERS = {
'stderr': {'loglevel': 'WARNING', 'bubble': True},
# 'smtp': {
# 'from_addr': '[email protected]',
# 'recipients': ('[email protected]'),
# 'credentials':('testusername', 'password'),
# 'server_addr': ('127.0.0.1', 25),
# 'secure': (),
# 'level': 'DEBUG',
# 'bubble': True
# }
}
# Templates will use those filters, along with the defaults.
# Consult your engine's documentation on filters if you need help defining
# those.
# TEMPLATE_FILTERS = {}
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
| mit |
qma/pants | tests/python/pants_test/backend/jvm/tasks/test_classpath_util.py | 1 | 2852 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.goal.products import UnionProducts
from pants_test.base_test import BaseTest
class ClasspathUtilTest(BaseTest):
def test_path_with_differing_conf_ignored(self):
a = self.make_target('a', JvmTarget)
classpath_product = UnionProducts()
path = os.path.join(self.build_root, 'jar/path')
classpath_product.add_for_target(a, [('default', path)])
classpath = ClasspathUtil.compute_classpath([a],
classpath_product,
extra_classpath_tuples=[],
confs=['not-default'])
self.assertEqual([], classpath)
def test_path_with_overlapped_conf_added(self):
a = self.make_target('a', JvmTarget)
classpath_product = UnionProducts()
path = os.path.join(self.build_root, 'jar/path')
classpath_product.add_for_target(a, [('default', path)])
classpath = ClasspathUtil.compute_classpath([a],
classpath_product,
extra_classpath_tuples=[],
confs=['not-default', 'default'])
self.assertEqual([path], classpath)
def test_extra_path_added(self):
a = self.make_target('a', JvmTarget)
classpath_product = UnionProducts()
path = os.path.join(self.build_root, 'jar/path')
classpath_product.add_for_target(a, [('default', path)])
extra_path = 'new-path'
extra_cp_tuples = [('default', extra_path)]
classpath = ClasspathUtil.compute_classpath([a],
classpath_product,
extra_classpath_tuples=extra_cp_tuples,
confs=['default'])
self.assertEqual([path, extra_path], classpath)
def test_relies_on_product_to_validate_paths_outside_buildroot(self):
a = self.make_target('a', JvmTarget)
classpath_product = UnionProducts()
classpath_product.add_for_target(a, [('default', '/dev/null')])
classpath = ClasspathUtil.compute_classpath([a],
classpath_product,
extra_classpath_tuples=[],
confs=['default'])
self.assertEqual(['/dev/null'], classpath)
| apache-2.0 |
jroyal/plexpy | lib/logutils/queue.py | 34 | 7547 | #
# Copyright (C) 2010-2013 Vinay Sajip. See LICENSE.txt for details.
#
"""
This module contains classes which help you work with queues. A typical
application is when you want to log from performance-critical threads, but
where the handlers you want to use are slow (for example,
:class:`~logging.handlers.SMTPHandler`). In that case, you can create a queue,
pass it to a :class:`QueueHandler` instance and use that instance with your
loggers. Elsewhere, you can instantiate a :class:`QueueListener` with the same
queue and some slow handlers, and call :meth:`~QueueListener.start` on it.
This will start monitoring the queue on a separate thread and call all the
configured handlers *on that thread*, so that your logging thread is not held
up by the slow handlers.
Note that as well as in-process queues, you can use these classes with queues
from the :mod:`multiprocessing` module.
**N.B.** This is part of the standard library since Python 3.2, so the
version here is for use with earlier Python versions.
"""
import logging
try:
import Queue as queue
except ImportError:
import queue
import threading
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
:param queue: The queue to send `LogRecords` to.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses :meth:`~queue.Queue.put_nowait`. You may
want to override this method if you want to use blocking, timeouts or
custom queue implementations.
:param record: The record to enqueue.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
:param record: The record to prepare.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
:param record: The record to emit.
"""
try:
self.enqueue(self.prepare(record))
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
:param record: The queue to listen to.
:param handlers: The handlers to invoke on everything received from
the queue.
"""
_sentinel = None
def __init__(self, queue, *handlers):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses :meth:`~queue.Queue.get`. You may want to
override this method if you want to use timeouts or work with custom
queue implementations.
:param block: Whether to block if the queue is empty. If `False` and
the queue is empty, an :class:`~queue.Empty` exception
will be thrown.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
:param record: The record to prepare.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
:param record: The record to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
Writes a sentinel to the queue to tell the listener to quit. This
implementation uses ``put_nowait()``. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.enqueue_sentinel()
self._thread.join()
self._thread = None
| gpl-3.0 |
singingwolfboy/readthedocs.org | readthedocs/restapi/serializers.py | 7 | 1774 | from rest_framework import serializers
from readthedocs.builds.models import Build, Version
from readthedocs.projects.models import Project
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = (
'id',
'name', 'slug', 'description', 'language',
'repo', 'repo_type',
'default_version', 'default_branch',
'documentation_type',
'users',
)
class ProjectFullSerializer(ProjectSerializer):
'''Serializer for all fields on project model'''
class Meta:
model = Project
class VersionSerializer(serializers.ModelSerializer):
project = ProjectSerializer()
downloads = serializers.DictField(source='get_downloads', read_only=True)
class Meta:
model = Version
fields = (
'id',
'project', 'slug',
'identifier', 'verbose_name',
'active', 'built',
'downloads',
)
class BuildSerializer(serializers.ModelSerializer):
project = ProjectSerializer()
class Meta:
model = Build
fields = (
'id',
'project',
'commit',
'type',
'date',
'success',
)
class VersionFullSerializer(VersionSerializer):
'''Serializer for all fields on version model'''
project = ProjectFullSerializer()
class Meta:
model = Version
class SearchIndexSerializer(serializers.Serializer):
q = serializers.CharField(max_length=500)
project = serializers.CharField(max_length=500, required=False)
version = serializers.CharField(max_length=500, required=False)
page = serializers.CharField(max_length=500, required=False)
| mit |
tsmrachel/remo | remo/remozilla/tasks.py | 1 | 5981 | from __future__ import unicode_literals
from datetime import datetime, timedelta
from urllib import quote
from django.conf import settings
from django.contrib.auth.models import User
from django.db import transaction
from django.utils import timezone
import requests
import waffle
from celery.task import periodic_task
from remo.base.templatetags.helpers import urlparams
from remo.base.utils import get_object_or_none
from remo.remozilla.models import Bug
from remo.remozilla.utils import get_last_updated_date, set_last_updated_date
COMPONENTS = ['Budget Requests', 'Community IT Requests', 'Mentorship',
'Swag Requests', 'Planning']
BUGZILLA_FIELDS = ['is_confirmed', 'summary', 'creator', 'creation_time',
'component', 'whiteboard', 'op_sys', 'cc', 'id',
'status', 'assigned_to', 'resolution',
'last_change_time', 'flags']
URL = ('https://bugzilla.mozilla.org/rest/bug?api_key={api_key}'
'&product=Mozilla%20Reps&component={component}&'
'include_fields={fields}&last_change_time={timestamp}&'
'offset={offset}&limit={limit}')
COMMENT_URL = 'https://bugzilla.mozilla.org/rest/bug/{id}/comment?api_key={api_key}'
LIMIT = 100
BUG_WHITEBOARD = 'Review Team approval needed'
BUG_REVIEW = 'remo-review'
BUG_APPROVAL = 'remo-approval'
def parse_bugzilla_time(time):
if not time:
return None
datetimeobj = datetime.strptime(time, '%Y-%m-%dT%H:%M:%SZ')
datetimeobj = timezone.make_aware(datetimeobj, timezone.utc)
return datetimeobj
@periodic_task(run_every=timedelta(minutes=15))
@transaction.atomic
def fetch_bugs(components=COMPONENTS, days=None):
"""Fetch all bugs from Bugzilla.
Loop over components and fetch bugs updated the last days. Link
Bugzilla users with users on this website, when possible.
"""
now = timezone.now()
if not days:
changed_date = get_last_updated_date()
else:
changed_date = now - timedelta(int(days))
for component in components:
offset = 0
url = URL.format(api_key=settings.REMOZILLA_API_KEY, component=quote(component),
fields=','.join(BUGZILLA_FIELDS),
timestamp=changed_date, offset=offset, limit=LIMIT)
while True:
bugs = requests.get(url).json()
error = bugs.get('error')
# Check the server response for errors
if error:
raise ValueError('Invalid response from server, {0}.'.format(bugs['message']))
remo_bugs = bugs.get('bugs', [])
if not remo_bugs:
break
for bdata in remo_bugs:
# Get comments for current bug
comment_url = COMMENT_URL.format(id=bdata['id'],
api_key=settings.REMOZILLA_API_KEY)
comments = requests.get(comment_url).json()
error = comments.get('error')
if error:
raise ValueError('Invalid response from server, {0}.'
.format(comments['message']))
bug, created = Bug.objects.get_or_create(bug_id=bdata['id'])
bug.summary = bdata.get('summary', '')
creator_email = bdata['creator']
bug.creator = get_object_or_none(User, email=creator_email)
bug.bug_creation_time = parse_bugzilla_time(bdata['creation_time'])
bug.component = bdata['component']
bug.whiteboard = bdata.get('whiteboard', '')
bug.cc.clear()
for email in bdata.get('cc', []):
cc_user = get_object_or_none(User, email=email)
if cc_user:
bug.cc.add(cc_user)
bug.assigned_to = get_object_or_none(
User, email=bdata['assigned_to'])
bug.status = bdata['status']
bug.resolution = bdata.get('resolution', '')
bug.bug_last_change_time = parse_bugzilla_time(bdata.get('last_change_time'))
automated_voting_trigger = 0
bug.budget_needinfo.clear()
bug.council_member_assigned = False
bug.pending_mentor_validation = False
for flag in bdata.get('flags', []):
if flag['status'] == '?' and flag['name'] == BUG_APPROVAL:
automated_voting_trigger += 1
if BUG_WHITEBOARD in bug.whiteboard:
bug.council_member_assigned = True
if ((flag['status'] == '?' and
flag['name'] == 'needinfo' and 'requestee' in flag and
flag['requestee'] == (settings.REPS_REVIEW_ALIAS))):
automated_voting_trigger += 1
if flag['status'] == '?' and flag['name'] == BUG_REVIEW:
bug.pending_mentor_validation = True
if (flag['status'] == '?' and flag['name'] == 'needinfo' and
'requestee' in flag):
email = flag['requestee']
user = get_object_or_none(User, email=email)
if user:
bug.budget_needinfo.add(user)
if automated_voting_trigger == 2 and waffle.switch_is_active('automated_polls'):
bug.council_vote_requested = True
unicode_id = str(bdata['id'])
bug_comments = comments['bugs'][unicode_id]['comments']
if bug_comments and bug_comments[0].get('text', ''):
# Enforce unicode encoding.
bug.first_comment = bug_comments[0]['text']
bug.save()
offset += LIMIT
url = urlparams(url, offset=offset)
set_last_updated_date(now)
| bsd-3-clause |
drnextgis/QGIS | python/ext-libs/pytz/tzfile.py | 480 | 4869 | #!/usr/bin/env python
'''
$Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $
'''
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from datetime import datetime, timedelta
from struct import unpack, calcsize
from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo
from pytz.tzinfo import memorized_datetime, memorized_timedelta
def _byte_string(s):
"""Cast a string or byte string to an ASCII byte string."""
return s.encode('US-ASCII')
_NULL = _byte_string('\0')
def _std_string(s):
"""Cast a string or byte string to an ASCII string."""
return str(s.decode('US-ASCII'))
def build_tzinfo(zone, fp):
head_fmt = '>4s c 15x 6l'
head_size = calcsize(head_fmt)
(magic, format, ttisgmtcnt, ttisstdcnt,leapcnt, timecnt,
typecnt, charcnt) = unpack(head_fmt, fp.read(head_size))
# Make sure it is a tzfile(5) file
assert magic == _byte_string('TZif'), 'Got magic %s' % repr(magic)
# Read out the transition times, localtime indices and ttinfo structures.
data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt)
data_size = calcsize(data_fmt)
data = unpack(data_fmt, fp.read(data_size))
# make sure we unpacked the right number of values
assert len(data) == 2 * timecnt + 3 * typecnt + 1
transitions = [memorized_datetime(trans)
for trans in data[:timecnt]]
lindexes = list(data[timecnt:2 * timecnt])
ttinfo_raw = data[2 * timecnt:-1]
tznames_raw = data[-1]
del data
# Process ttinfo into separate structs
ttinfo = []
tznames = {}
i = 0
while i < len(ttinfo_raw):
# have we looked up this timezone name yet?
tzname_offset = ttinfo_raw[i+2]
if tzname_offset not in tznames:
nul = tznames_raw.find(_NULL, tzname_offset)
if nul < 0:
nul = len(tznames_raw)
tznames[tzname_offset] = _std_string(
tznames_raw[tzname_offset:nul])
ttinfo.append((ttinfo_raw[i],
bool(ttinfo_raw[i+1]),
tznames[tzname_offset]))
i += 3
# Now build the timezone object
if len(transitions) == 0:
ttinfo[0][0], ttinfo[0][2]
cls = type(zone, (StaticTzInfo,), dict(
zone=zone,
_utcoffset=memorized_timedelta(ttinfo[0][0]),
_tzname=ttinfo[0][2]))
else:
# Early dates use the first standard time ttinfo
i = 0
while ttinfo[i][1]:
i += 1
if ttinfo[i] == ttinfo[lindexes[0]]:
transitions[0] = datetime.min
else:
transitions.insert(0, datetime.min)
lindexes.insert(0, i)
# calculate transition info
transition_info = []
for i in range(len(transitions)):
inf = ttinfo[lindexes[i]]
utcoffset = inf[0]
if not inf[1]:
dst = 0
else:
for j in range(i-1, -1, -1):
prev_inf = ttinfo[lindexes[j]]
if not prev_inf[1]:
break
dst = inf[0] - prev_inf[0] # dst offset
# Bad dst? Look further. DST > 24 hours happens when
# a timzone has moved across the international dateline.
if dst <= 0 or dst > 3600*3:
for j in range(i+1, len(transitions)):
stdinf = ttinfo[lindexes[j]]
if not stdinf[1]:
dst = inf[0] - stdinf[0]
if dst > 0:
break # Found a useful std time.
tzname = inf[2]
# Round utcoffset and dst to the nearest minute or the
# datetime library will complain. Conversions to these timezones
# might be up to plus or minus 30 seconds out, but it is
# the best we can do.
utcoffset = int((utcoffset + 30) // 60) * 60
dst = int((dst + 30) // 60) * 60
transition_info.append(memorized_ttinfo(utcoffset, dst, tzname))
cls = type(zone, (DstTzInfo,), dict(
zone=zone,
_utc_transition_times=transitions,
_transition_info=transition_info))
return cls()
if __name__ == '__main__':
import os.path
from pprint import pprint
base = os.path.join(os.path.dirname(__file__), 'zoneinfo')
tz = build_tzinfo('Australia/Melbourne',
open(os.path.join(base,'Australia','Melbourne'), 'rb'))
tz = build_tzinfo('US/Eastern',
open(os.path.join(base,'US','Eastern'), 'rb'))
pprint(tz._utc_transition_times)
#print tz.asPython(4)
#print tz.transitions_mapping
| gpl-2.0 |
googleads/google-ads-python | google/ads/googleads/v6/services/services/offline_user_data_job_service/transports/base.py | 1 | 5512 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v6.resources.types import offline_user_data_job
from google.ads.googleads.v6.services.types import offline_user_data_job_service
from google.longrunning import operations_pb2 as operations # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class OfflineUserDataJobServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for OfflineUserDataJobService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.create_offline_user_data_job: gapic_v1.method.wrap_method(
self.create_offline_user_data_job,
default_timeout=None,
client_info=client_info,
),
self.get_offline_user_data_job: gapic_v1.method.wrap_method(
self.get_offline_user_data_job,
default_timeout=None,
client_info=client_info,
),
self.add_offline_user_data_job_operations: gapic_v1.method.wrap_method(
self.add_offline_user_data_job_operations,
default_timeout=None,
client_info=client_info,
),
self.run_offline_user_data_job: gapic_v1.method.wrap_method(
self.run_offline_user_data_job,
default_timeout=None,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError
@property
def create_offline_user_data_job(
self,
) -> typing.Callable[
[offline_user_data_job_service.CreateOfflineUserDataJobRequest],
offline_user_data_job_service.CreateOfflineUserDataJobResponse,
]:
raise NotImplementedError
@property
def get_offline_user_data_job(
self,
) -> typing.Callable[
[offline_user_data_job_service.GetOfflineUserDataJobRequest],
offline_user_data_job.OfflineUserDataJob,
]:
raise NotImplementedError
@property
def add_offline_user_data_job_operations(
self,
) -> typing.Callable[
[offline_user_data_job_service.AddOfflineUserDataJobOperationsRequest],
offline_user_data_job_service.AddOfflineUserDataJobOperationsResponse,
]:
raise NotImplementedError
@property
def run_offline_user_data_job(
self,
) -> typing.Callable[
[offline_user_data_job_service.RunOfflineUserDataJobRequest],
operations.Operation,
]:
raise NotImplementedError
__all__ = ("OfflineUserDataJobServiceTransport",)
| apache-2.0 |
alu0100207385/dsi_3Django | tests/defaultfilters/tests.py | 48 | 31837 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import decimal
from django.template.defaultfilters import *
from django.test import TestCase
from django.test.utils import TransRealMixin
from django.utils import six
from django.utils import unittest, translation
from django.utils.safestring import SafeData
from django.utils.encoding import python_2_unicode_compatible
class DefaultFiltersTests(TestCase):
def test_floatformat(self):
self.assertEqual(floatformat(7.7), '7.7')
self.assertEqual(floatformat(7.0), '7')
self.assertEqual(floatformat(0.7), '0.7')
self.assertEqual(floatformat(0.07), '0.1')
self.assertEqual(floatformat(0.007), '0.0')
self.assertEqual(floatformat(0.0), '0')
self.assertEqual(floatformat(7.7, 3), '7.700')
self.assertEqual(floatformat(6.000000, 3), '6.000')
self.assertEqual(floatformat(6.200000, 3), '6.200')
self.assertEqual(floatformat(6.200000, -3), '6.200')
self.assertEqual(floatformat(13.1031, -3), '13.103')
self.assertEqual(floatformat(11.1197, -2), '11.12')
self.assertEqual(floatformat(11.0000, -2), '11')
self.assertEqual(floatformat(11.000001, -2), '11.00')
self.assertEqual(floatformat(8.2798, 3), '8.280')
self.assertEqual(floatformat(5555.555, 2), '5555.56')
self.assertEqual(floatformat(001.3000, 2), '1.30')
self.assertEqual(floatformat(0.12345, 2), '0.12')
self.assertEqual(floatformat(decimal.Decimal('555.555'), 2), '555.56')
self.assertEqual(floatformat(decimal.Decimal('09.000')), '9')
self.assertEqual(floatformat('foo'), '')
self.assertEqual(floatformat(13.1031, 'bar'), '13.1031')
self.assertEqual(floatformat(18.125, 2), '18.13')
self.assertEqual(floatformat('foo', 'bar'), '')
self.assertEqual(floatformat('¿Cómo esta usted?'), '')
self.assertEqual(floatformat(None), '')
# Check that we're not converting to scientific notation.
self.assertEqual(floatformat(0, 6), '0.000000')
self.assertEqual(floatformat(0, 7), '0.0000000')
self.assertEqual(floatformat(0, 10), '0.0000000000')
self.assertEqual(floatformat(0.000000000000000000015, 20),
'0.00000000000000000002')
pos_inf = float(1e30000)
self.assertEqual(floatformat(pos_inf), six.text_type(pos_inf))
neg_inf = float(-1e30000)
self.assertEqual(floatformat(neg_inf), six.text_type(neg_inf))
nan = pos_inf / pos_inf
self.assertEqual(floatformat(nan), six.text_type(nan))
class FloatWrapper(object):
def __init__(self, value):
self.value = value
def __float__(self):
return self.value
self.assertEqual(floatformat(FloatWrapper(11.000001), -2), '11.00')
# Regression for #15789
decimal_ctx = decimal.getcontext()
old_prec, decimal_ctx.prec = decimal_ctx.prec, 2
try:
self.assertEqual(floatformat(1.2345, 2), '1.23')
self.assertEqual(floatformat(15.2042, -3), '15.204')
self.assertEqual(floatformat(1.2345, '2'), '1.23')
self.assertEqual(floatformat(15.2042, '-3'), '15.204')
self.assertEqual(floatformat(decimal.Decimal('1.2345'), 2), '1.23')
self.assertEqual(floatformat(decimal.Decimal('15.2042'), -3), '15.204')
finally:
decimal_ctx.prec = old_prec
def test_floatformat_py2_fail(self):
self.assertEqual(floatformat(1.00000000000000015, 16), '1.0000000000000002')
# The test above fails because of Python 2's float handling. Floats with
# many zeroes after the decimal point should be passed in as another type
# such as unicode or Decimal.
if six.PY2:
test_floatformat_py2_fail = unittest.expectedFailure(test_floatformat_py2_fail)
def test_addslashes(self):
self.assertEqual(addslashes('"double quotes" and \'single quotes\''),
'\\"double quotes\\" and \\\'single quotes\\\'')
self.assertEqual(addslashes(r'\ : backslashes, too'),
'\\\\ : backslashes, too')
def test_capfirst(self):
self.assertEqual(capfirst('hello world'), 'Hello world')
def test_escapejs(self):
self.assertEqual(escapejs_filter('"double quotes" and \'single quotes\''),
'\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027')
self.assertEqual(escapejs_filter(r'\ : backslashes, too'),
'\\u005C : backslashes, too')
self.assertEqual(escapejs_filter('and lots of whitespace: \r\n\t\v\f\b'),
'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008')
self.assertEqual(escapejs_filter(r'<script>and this</script>'),
'\\u003Cscript\\u003Eand this\\u003C/script\\u003E')
self.assertEqual(
escapejs_filter('paragraph separator:\u2029and line separator:\u2028'),
'paragraph separator:\\u2029and line separator:\\u2028')
def test_fix_ampersands(self):
self.assertEqual(fix_ampersands_filter('Jack & Jill & Jeroboam'),
'Jack & Jill & Jeroboam')
def test_linenumbers(self):
self.assertEqual(linenumbers('line 1\nline 2'),
'1. line 1\n2. line 2')
self.assertEqual(linenumbers('\n'.join(['x'] * 10)),
'01. x\n02. x\n03. x\n04. x\n05. x\n06. x\n07. '\
'x\n08. x\n09. x\n10. x')
def test_lower(self):
self.assertEqual(lower('TEST'), 'test')
# uppercase E umlaut
self.assertEqual(lower('\xcb'), '\xeb')
def test_make_list(self):
self.assertEqual(make_list('abc'), ['a', 'b', 'c'])
self.assertEqual(make_list(1234), ['1', '2', '3', '4'])
def test_slugify(self):
self.assertEqual(slugify(' Jack & Jill like numbers 1,2,3 and 4 and'\
' silly characters ?%.$!/'),
'jack-jill-like-numbers-123-and-4-and-silly-characters')
self.assertEqual(slugify("Un \xe9l\xe9phant \xe0 l'or\xe9e du bois"),
'un-elephant-a-loree-du-bois')
def test_stringformat(self):
self.assertEqual(stringformat(1, '03d'), '001')
self.assertEqual(stringformat(1, 'z'), '')
def test_title(self):
self.assertEqual(title('a nice title, isn\'t it?'),
"A Nice Title, Isn't It?")
self.assertEqual(title('discoth\xe8que'), 'Discoth\xe8que')
def test_truncatewords(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 1), 'A ...')
self.assertEqual(
truncatewords('A sentence with a few words in it', 5),
'A sentence with a few ...')
self.assertEqual(
truncatewords('A sentence with a few words in it', 100),
'A sentence with a few words in it')
self.assertEqual(
truncatewords('A sentence with a few words in it',
'not a number'), 'A sentence with a few words in it')
def test_truncatewords_html(self):
self.assertEqual(truncatewords_html(
'<p>one <a href="#">two - three <br>four</a> five</p>', 0), '')
self.assertEqual(truncatewords_html('<p>one <a href="#">two - '\
'three <br>four</a> five</p>', 2),
'<p>one <a href="#">two ...</a></p>')
self.assertEqual(truncatewords_html(
'<p>one <a href="#">two - three <br>four</a> five</p>', 4),
'<p>one <a href="#">two - three <br>four ...</a></p>')
self.assertEqual(truncatewords_html(
'<p>one <a href="#">two - three <br>four</a> five</p>', 5),
'<p>one <a href="#">two - three <br>four</a> five</p>')
self.assertEqual(truncatewords_html(
'<p>one <a href="#">two - three <br>four</a> five</p>', 100),
'<p>one <a href="#">two - three <br>four</a> five</p>')
self.assertEqual(truncatewords_html(
'\xc5ngstr\xf6m was here', 1), '\xc5ngstr\xf6m ...')
def test_upper(self):
self.assertEqual(upper('Mixed case input'), 'MIXED CASE INPUT')
# lowercase e umlaut
self.assertEqual(upper('\xeb'), '\xcb')
def test_urlencode(self):
self.assertEqual(urlencode('fran\xe7ois & jill'),
'fran%C3%A7ois%20%26%20jill')
self.assertEqual(urlencode(1), '1')
def test_iriencode(self):
self.assertEqual(iriencode('S\xf8r-Tr\xf8ndelag'),
'S%C3%B8r-Tr%C3%B8ndelag')
self.assertEqual(iriencode(urlencode('fran\xe7ois & jill')),
'fran%C3%A7ois%20%26%20jill')
def test_urlizetrunc(self):
self.assertEqual(urlizetrunc('http://short.com/', 20), '<a href='\
'"http://short.com/" rel="nofollow">http://short.com/</a>')
self.assertEqual(urlizetrunc('http://www.google.co.uk/search?hl=en'\
'&q=some+long+url&btnG=Search&meta=', 20), '<a href="http://'\
'www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&'\
'meta=" rel="nofollow">http://www.google...</a>')
self.assertEqual(urlizetrunc('http://www.google.co.uk/search?hl=en'\
'&q=some+long+url&btnG=Search&meta=', 20), '<a href="http://'\
'www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search'\
'&meta=" rel="nofollow">http://www.google...</a>')
# Check truncating of URIs which are the exact length
uri = 'http://31characteruri.com/test/'
self.assertEqual(len(uri), 31)
self.assertEqual(urlizetrunc(uri, 31),
'<a href="http://31characteruri.com/test/" rel="nofollow">'\
'http://31characteruri.com/test/</a>')
self.assertEqual(urlizetrunc(uri, 30),
'<a href="http://31characteruri.com/test/" rel="nofollow">'\
'http://31characteruri.com/t...</a>')
self.assertEqual(urlizetrunc(uri, 2),
'<a href="http://31characteruri.com/test/"'\
' rel="nofollow">...</a>')
def test_urlize(self):
# Check normal urlize
self.assertEqual(urlize('http://google.com'),
'<a href="http://google.com" rel="nofollow">http://google.com</a>')
self.assertEqual(urlize('http://google.com/'),
'<a href="http://google.com/" rel="nofollow">http://google.com/</a>')
self.assertEqual(urlize('www.google.com'),
'<a href="http://www.google.com" rel="nofollow">www.google.com</a>')
self.assertEqual(urlize('djangoproject.org'),
'<a href="http://djangoproject.org" rel="nofollow">djangoproject.org</a>')
self.assertEqual(urlize('[email protected]'),
'<a href="mailto:[email protected]">[email protected]</a>')
# Check urlize with https addresses
self.assertEqual(urlize('https://google.com'),
'<a href="https://google.com" rel="nofollow">https://google.com</a>')
# Check urlize doesn't overquote already quoted urls - see #9655
# The teststring is the urlquoted version of 'http://hi.baidu.com/重新开始'
self.assertEqual(urlize('http://hi.baidu.com/%E9%87%8D%E6%96%B0%E5%BC%80%E5%A7%8B'),
'<a href="http://hi.baidu.com/%E9%87%8D%E6%96%B0%E5%BC%80%E5%A7%8B" rel="nofollow">'
'http://hi.baidu.com/%E9%87%8D%E6%96%B0%E5%BC%80%E5%A7%8B</a>')
self.assertEqual(urlize('www.mystore.com/30%OffCoupons!'),
'<a href="http://www.mystore.com/30%25OffCoupons!" rel="nofollow">'
'www.mystore.com/30%OffCoupons!</a>')
self.assertEqual(urlize('http://en.wikipedia.org/wiki/Caf%C3%A9'),
'<a href="http://en.wikipedia.org/wiki/Caf%C3%A9" rel="nofollow">'
'http://en.wikipedia.org/wiki/Caf%C3%A9</a>')
self.assertEqual(urlize('http://en.wikipedia.org/wiki/Café'),
'<a href="http://en.wikipedia.org/wiki/Caf%C3%A9" rel="nofollow">'
'http://en.wikipedia.org/wiki/Café</a>')
# Check urlize keeps balanced parentheses - see #11911
self.assertEqual(urlize('http://en.wikipedia.org/wiki/Django_(web_framework)'),
'<a href="http://en.wikipedia.org/wiki/Django_(web_framework)" rel="nofollow">'
'http://en.wikipedia.org/wiki/Django_(web_framework)</a>')
self.assertEqual(urlize('(see http://en.wikipedia.org/wiki/Django_(web_framework))'),
'(see <a href="http://en.wikipedia.org/wiki/Django_(web_framework)" rel="nofollow">'
'http://en.wikipedia.org/wiki/Django_(web_framework)</a>)')
# Check urlize adds nofollow properly - see #12183
self.assertEqual(urlize('[email protected] or www.bar.com'),
'<a href="mailto:[email protected]">[email protected]</a> or '
'<a href="http://www.bar.com" rel="nofollow">www.bar.com</a>')
# Check urlize handles IDN correctly - see #13704
self.assertEqual(urlize('http://c✶.ws'),
'<a href="http://xn--c-lgq.ws" rel="nofollow">http://c✶.ws</a>')
self.assertEqual(urlize('www.c✶.ws'),
'<a href="http://www.xn--c-lgq.ws" rel="nofollow">www.c✶.ws</a>')
self.assertEqual(urlize('c✶.org'),
'<a href="http://xn--c-lgq.org" rel="nofollow">c✶.org</a>')
self.assertEqual(urlize('info@c✶.org'),
'<a href="mailto:[email protected]">info@c✶.org</a>')
# Check urlize doesn't highlight malformed URIs - see #16395
self.assertEqual(urlize('http:///www.google.com'),
'http:///www.google.com')
self.assertEqual(urlize('http://.google.com'),
'http://.google.com')
self.assertEqual(urlize('http://@foo.com'),
'http://@foo.com')
# Check urlize accepts more TLDs - see #16656
self.assertEqual(urlize('usa.gov'),
'<a href="http://usa.gov" rel="nofollow">usa.gov</a>')
# Check urlize don't crash on invalid email with dot-starting domain - see #17592
self.assertEqual(urlize('[email protected]'),
'[email protected]')
# Check urlize accepts uppercased URL schemes - see #18071
self.assertEqual(urlize('HTTPS://github.com/'),
'<a href="https://github.com/" rel="nofollow">HTTPS://github.com/</a>')
# Check urlize trims trailing period when followed by parenthesis - see #18644
self.assertEqual(urlize('(Go to http://www.example.com/foo.)'),
'(Go to <a href="http://www.example.com/foo" rel="nofollow">http://www.example.com/foo</a>.)')
# Check urlize handles brackets properly (#19070)
self.assertEqual(urlize('[see www.example.com]'),
'[see <a href="http://www.example.com" rel="nofollow">www.example.com</a>]' )
self.assertEqual(urlize('see test[at[example.com'),
'see <a href="http://test[at[example.com" rel="nofollow">test[at[example.com</a>' )
self.assertEqual(urlize('[http://168.192.0.1](http://168.192.0.1)'),
'[<a href="http://168.192.0.1](http://168.192.0.1)" rel="nofollow">http://168.192.0.1](http://168.192.0.1)</a>')
# Check urlize works with IPv4/IPv6 addresses
self.assertEqual(urlize('http://192.168.0.15/api/9'),
'<a href="http://192.168.0.15/api/9" rel="nofollow">http://192.168.0.15/api/9</a>')
self.assertEqual(urlize('http://[2001:db8:cafe::2]/api/9'),
'<a href="http://[2001:db8:cafe::2]/api/9" rel="nofollow">http://[2001:db8:cafe::2]/api/9</a>')
def test_wordcount(self):
self.assertEqual(wordcount(''), 0)
self.assertEqual(wordcount('oneword'), 1)
self.assertEqual(wordcount('lots of words'), 3)
self.assertEqual(wordwrap('this is a long paragraph of text that '\
'really needs to be wrapped I\'m afraid', 14),
"this is a long\nparagraph of\ntext that\nreally needs\nto be "\
"wrapped\nI'm afraid")
self.assertEqual(wordwrap('this is a short paragraph of text.\n '\
'But this line should be indented', 14),
'this is a\nshort\nparagraph of\ntext.\n But this\nline '\
'should be\nindented')
self.assertEqual(wordwrap('this is a short paragraph of text.\n '\
'But this line should be indented',15), 'this is a short\n'\
'paragraph of\ntext.\n But this line\nshould be\nindented')
def test_rjust(self):
self.assertEqual(ljust('test', 10), 'test ')
self.assertEqual(ljust('test', 3), 'test')
self.assertEqual(rjust('test', 10), ' test')
self.assertEqual(rjust('test', 3), 'test')
def test_center(self):
self.assertEqual(center('test', 6), ' test ')
def test_cut(self):
self.assertEqual(cut('a string to be mangled', 'a'),
' string to be mngled')
self.assertEqual(cut('a string to be mangled', 'ng'),
'a stri to be maled')
self.assertEqual(cut('a string to be mangled', 'strings'),
'a string to be mangled')
def test_force_escape(self):
escaped = force_escape('<some html & special characters > here')
self.assertEqual(
escaped, '<some html & special characters > here')
self.assertIsInstance(escaped, SafeData)
self.assertEqual(
force_escape('<some html & special characters > here ĐÅ€£'),
'<some html & special characters > here'\
' \u0110\xc5\u20ac\xa3')
def test_linebreaks(self):
self.assertEqual(linebreaks_filter('line 1'), '<p>line 1</p>')
self.assertEqual(linebreaks_filter('line 1\nline 2'),
'<p>line 1<br />line 2</p>')
self.assertEqual(linebreaks_filter('line 1\rline 2'),
'<p>line 1<br />line 2</p>')
self.assertEqual(linebreaks_filter('line 1\r\nline 2'),
'<p>line 1<br />line 2</p>')
def test_linebreaksbr(self):
self.assertEqual(linebreaksbr('line 1\nline 2'),
'line 1<br />line 2')
self.assertEqual(linebreaksbr('line 1\rline 2'),
'line 1<br />line 2')
self.assertEqual(linebreaksbr('line 1\r\nline 2'),
'line 1<br />line 2')
def test_removetags(self):
self.assertEqual(removetags('some <b>html</b> with <script>alert'\
'("You smell")</script> disallowed <img /> tags', 'script img'),
'some <b>html</b> with alert("You smell") disallowed tags')
self.assertEqual(striptags('some <b>html</b> with <script>alert'\
'("You smell")</script> disallowed <img /> tags'),
'some html with alert("You smell") disallowed tags')
def test_dictsort(self):
sorted_dicts = dictsort([{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}], 'age')
self.assertEqual([sorted(dict.items()) for dict in sorted_dicts],
[[('age', 18), ('name', 'Jonny B Goode')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 63), ('name', 'Ra Ra Rasputin')]])
# If it gets passed a list of something else different from
# dictionaries it should fail silently
self.assertEqual(dictsort([1, 2, 3], 'age'), '')
self.assertEqual(dictsort('Hello!', 'age'), '')
self.assertEqual(dictsort({'a': 1}, 'age'), '')
self.assertEqual(dictsort(1, 'age'), '')
def test_dictsortreversed(self):
sorted_dicts = dictsortreversed([{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}],
'age')
self.assertEqual([sorted(dict.items()) for dict in sorted_dicts],
[[('age', 63), ('name', 'Ra Ra Rasputin')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 18), ('name', 'Jonny B Goode')]])
# If it gets passed a list of something else different from
# dictionaries it should fail silently
self.assertEqual(dictsortreversed([1, 2, 3], 'age'), '')
self.assertEqual(dictsortreversed('Hello!', 'age'), '')
self.assertEqual(dictsortreversed({'a': 1}, 'age'), '')
self.assertEqual(dictsortreversed(1, 'age'), '')
def test_first(self):
self.assertEqual(first([0,1,2]), 0)
self.assertEqual(first(''), '')
self.assertEqual(first('test'), 't')
def test_join(self):
self.assertEqual(join([0,1,2], 'glue'), '0glue1glue2')
def test_length(self):
self.assertEqual(length('1234'), 4)
self.assertEqual(length([1,2,3,4]), 4)
self.assertEqual(length_is([], 0), True)
self.assertEqual(length_is([], 1), False)
self.assertEqual(length_is('a', 1), True)
self.assertEqual(length_is('a', 10), False)
def test_slice(self):
self.assertEqual(slice_filter('abcdefg', '0'), '')
self.assertEqual(slice_filter('abcdefg', '1'), 'a')
self.assertEqual(slice_filter('abcdefg', '-1'), 'abcdef')
self.assertEqual(slice_filter('abcdefg', '1:2'), 'b')
self.assertEqual(slice_filter('abcdefg', '1:3'), 'bc')
self.assertEqual(slice_filter('abcdefg', '0::2'), 'aceg')
def test_unordered_list(self):
self.assertEqual(unordered_list(['item 1', 'item 2']),
'\t<li>item 1</li>\n\t<li>item 2</li>')
self.assertEqual(unordered_list(['item 1', ['item 1.1']]),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>')
self.assertEqual(
unordered_list(['item 1', ['item 1.1', 'item1.2'], 'item 2']),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t\t<li>item1.2'\
'</li>\n\t</ul>\n\t</li>\n\t<li>item 2</li>')
self.assertEqual(
unordered_list(['item 1', ['item 1.1', ['item 1.1.1',
['item 1.1.1.1']]]]),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1\n\t\t<ul>\n\t\t\t<li>'\
'item 1.1.1\n\t\t\t<ul>\n\t\t\t\t<li>item 1.1.1.1</li>\n\t\t\t'\
'</ul>\n\t\t\t</li>\n\t\t</ul>\n\t\t</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list(
['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]),
'\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>'\
'Lawrence</li>\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>'\
'\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>')
@python_2_unicode_compatible
class ULItem(object):
def __init__(self, title):
self.title = title
def __str__(self):
return 'ulitem-%s' % str(self.title)
a = ULItem('a')
b = ULItem('b')
self.assertEqual(unordered_list([a,b]),
'\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>')
# Old format for unordered lists should still work
self.assertEqual(unordered_list(['item 1', []]), '\t<li>item 1</li>')
self.assertEqual(unordered_list(['item 1', [['item 1.1', []]]]),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list(['item 1', [['item 1.1', []],
['item 1.2', []]]]), '\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1'\
'</li>\n\t\t<li>item 1.2</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list(['States', [['Kansas', [['Lawrence',
[]], ['Topeka', []]]], ['Illinois', []]]]), '\t<li>States\n\t'\
'<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>Lawrence</li>'\
'\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>\n\t\t<li>'\
'Illinois</li>\n\t</ul>\n\t</li>')
def test_add(self):
self.assertEqual(add('1', '2'), 3)
def test_get_digit(self):
self.assertEqual(get_digit(123, 1), 3)
self.assertEqual(get_digit(123, 2), 2)
self.assertEqual(get_digit(123, 3), 1)
self.assertEqual(get_digit(123, 4), 0)
self.assertEqual(get_digit(123, 0), 123)
self.assertEqual(get_digit('xyz', 0), 'xyz')
def test_date(self):
# real testing of date() is in dateformat.py
self.assertEqual(date(datetime.datetime(2005, 12, 29), "d F Y"),
'29 December 2005')
self.assertEqual(date(datetime.datetime(2005, 12, 29), r'jS \o\f F'),
'29th of December')
def test_time(self):
# real testing of time() is done in dateformat.py
self.assertEqual(time(datetime.time(13), "h"), '01')
self.assertEqual(time(datetime.time(0), "h"), '12')
def test_timesince(self):
# real testing is done in timesince.py, where we can provide our own 'now'
# NOTE: \xa0 avoids wrapping between value and unit
self.assertEqual(
timesince_filter(datetime.datetime.now() - datetime.timedelta(1)),
'1\xa0day')
self.assertEqual(
timesince_filter(datetime.datetime(2005, 12, 29),
datetime.datetime(2005, 12, 30)),
'1\xa0day')
def test_timeuntil(self):
# NOTE: \xa0 avoids wrapping between value and unit
self.assertEqual(
timeuntil_filter(datetime.datetime.now() + datetime.timedelta(1, 1)),
'1\xa0day')
self.assertEqual(
timeuntil_filter(datetime.datetime(2005, 12, 30),
datetime.datetime(2005, 12, 29)),
'1\xa0day')
def test_default(self):
self.assertEqual(default("val", "default"), 'val')
self.assertEqual(default(None, "default"), 'default')
self.assertEqual(default('', "default"), 'default')
def test_if_none(self):
self.assertEqual(default_if_none("val", "default"), 'val')
self.assertEqual(default_if_none(None, "default"), 'default')
self.assertEqual(default_if_none('', "default"), '')
def test_divisibleby(self):
self.assertEqual(divisibleby(4, 2), True)
self.assertEqual(divisibleby(4, 3), False)
def test_yesno(self):
self.assertEqual(yesno(True), 'yes')
self.assertEqual(yesno(False), 'no')
self.assertEqual(yesno(None), 'maybe')
self.assertEqual(yesno(True, 'certainly,get out of town,perhaps'),
'certainly')
self.assertEqual(yesno(False, 'certainly,get out of town,perhaps'),
'get out of town')
self.assertEqual(yesno(None, 'certainly,get out of town,perhaps'),
'perhaps')
self.assertEqual(yesno(None, 'certainly,get out of town'),
'get out of town')
def test_filesizeformat(self):
# NOTE: \xa0 avoids wrapping between value and unit
self.assertEqual(filesizeformat(1023), '1023\xa0bytes')
self.assertEqual(filesizeformat(1024), '1.0\xa0KB')
self.assertEqual(filesizeformat(10*1024), '10.0\xa0KB')
self.assertEqual(filesizeformat(1024*1024-1), '1024.0\xa0KB')
self.assertEqual(filesizeformat(1024*1024), '1.0\xa0MB')
self.assertEqual(filesizeformat(1024*1024*50), '50.0\xa0MB')
self.assertEqual(filesizeformat(1024*1024*1024-1), '1024.0\xa0MB')
self.assertEqual(filesizeformat(1024*1024*1024), '1.0\xa0GB')
self.assertEqual(filesizeformat(1024*1024*1024*1024), '1.0\xa0TB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024), '1.0\xa0PB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024*2000),
'2000.0\xa0PB')
self.assertEqual(filesizeformat(complex(1,-1)), '0\xa0bytes')
self.assertEqual(filesizeformat(""), '0\xa0bytes')
self.assertEqual(filesizeformat("\N{GREEK SMALL LETTER ALPHA}"),
'0\xa0bytes')
def test_pluralize(self):
self.assertEqual(pluralize(1), '')
self.assertEqual(pluralize(0), 's')
self.assertEqual(pluralize(2), 's')
self.assertEqual(pluralize([1]), '')
self.assertEqual(pluralize([]), 's')
self.assertEqual(pluralize([1,2,3]), 's')
self.assertEqual(pluralize(1,'es'), '')
self.assertEqual(pluralize(0,'es'), 'es')
self.assertEqual(pluralize(2,'es'), 'es')
self.assertEqual(pluralize(1,'y,ies'), 'y')
self.assertEqual(pluralize(0,'y,ies'), 'ies')
self.assertEqual(pluralize(2,'y,ies'), 'ies')
self.assertEqual(pluralize(0,'y,ies,error'), '')
def test_phone2numeric(self):
self.assertEqual(phone2numeric_filter('0800 flowers'), '0800 3569377')
def test_non_string_input(self):
# Filters shouldn't break if passed non-strings
self.assertEqual(addslashes(123), '123')
self.assertEqual(linenumbers(123), '1. 123')
self.assertEqual(lower(123), '123')
self.assertEqual(make_list(123), ['1', '2', '3'])
self.assertEqual(slugify(123), '123')
self.assertEqual(title(123), '123')
self.assertEqual(truncatewords(123, 2), '123')
self.assertEqual(upper(123), '123')
self.assertEqual(urlencode(123), '123')
self.assertEqual(urlize(123), '123')
self.assertEqual(urlizetrunc(123, 1), '123')
self.assertEqual(wordcount(123), 1)
self.assertEqual(wordwrap(123, 2), '123')
self.assertEqual(ljust('123', 4), '123 ')
self.assertEqual(rjust('123', 4), ' 123')
self.assertEqual(center('123', 5), ' 123 ')
self.assertEqual(center('123', 6), ' 123 ')
self.assertEqual(cut(123, '2'), '13')
self.assertEqual(escape(123), '123')
self.assertEqual(linebreaks_filter(123), '<p>123</p>')
self.assertEqual(linebreaksbr(123), '123')
self.assertEqual(removetags(123, 'a'), '123')
self.assertEqual(striptags(123), '123')
class DefaultFiltersI18NTests(TransRealMixin, TestCase):
def test_localized_filesizeformat(self):
# NOTE: \xa0 avoids wrapping between value and unit
with self.settings(USE_L10N=True):
with translation.override('de', deactivate=True):
self.assertEqual(filesizeformat(1023), '1023\xa0Bytes')
self.assertEqual(filesizeformat(1024), '1,0\xa0KB')
self.assertEqual(filesizeformat(10*1024), '10,0\xa0KB')
self.assertEqual(filesizeformat(1024*1024-1), '1024,0\xa0KB')
self.assertEqual(filesizeformat(1024*1024), '1,0\xa0MB')
self.assertEqual(filesizeformat(1024*1024*50), '50,0\xa0MB')
self.assertEqual(filesizeformat(1024*1024*1024-1), '1024,0\xa0MB')
self.assertEqual(filesizeformat(1024*1024*1024), '1,0\xa0GB')
self.assertEqual(filesizeformat(1024*1024*1024*1024), '1,0\xa0TB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024),
'1,0\xa0PB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024*2000),
'2000,0\xa0PB')
self.assertEqual(filesizeformat(complex(1,-1)), '0\xa0Bytes')
self.assertEqual(filesizeformat(""), '0\xa0Bytes')
self.assertEqual(filesizeformat("\N{GREEK SMALL LETTER ALPHA}"),
'0\xa0Bytes')
| bsd-3-clause |
lopezdp/ConstructionEstimatingProject | feetInchesCalc.py | 1 | 1539 | import math
def measurements():
programOn = True
subTotal = 0
program_continue = input("Do you want to keep a running total of your measurements? 1 for yes, 2 for no: ");
if (program_continue == 1):
print " "
feet = input("Enter feet portion of distance: ");
inch = input("Enter inches portion of distance: ");
subTotal += (float(feet) + (float(inch)/12))
elif (program_continue == 2):
print subTotal
total = subTotal
programOn = False
return total
else:
print " "
program_continue = input("ERROR!!! Do you want to keep adding to your running total? 1 for yes, 2 for no: ");
while(programOn):
print " "
program_continue = input("Do you want to keep adding to your running total? 1 for yes, 2 for no: ");
if (program_continue == 1):
print " "
feet = input("Enter feet portion of distance: ");
inch = input("Enter inches portion of distance: ");
subTotal += (float(feet) + (float(inch)/12))
elif (program_continue == 2):
print " "
print "Your measurement in FEET is: ", subTotal
total = subTotal
meters = total * 0.3048
print "Your measurement in METERS is: ", meters
programOn = False
return total
else:
program_continue = input("ERROR!!! Do you want to keep adding to your running total? 1 for yes, 2 for no: ");
measurements()
| mit |
sharpdeep/pyspider | pyspider/libs/utils.py | 53 | 11526 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2012-11-06 11:50:13
import logging
import hashlib
import datetime
import base64
import six
from six import iteritems
md5string = lambda x: hashlib.md5(utf8(x)).hexdigest()
class ReadOnlyDict(dict):
"""A Read Only Dict"""
def __setitem__(self, key, value):
raise Exception("dict is read-only")
def getitem(obj, key=0, default=None):
"""Get first element of list or return default"""
try:
return obj[key]
except:
return default
def hide_me(tb, g=globals()):
"""Hide stack traceback of given stack"""
base_tb = tb
try:
while tb and tb.tb_frame.f_globals is not g:
tb = tb.tb_next
while tb and tb.tb_frame.f_globals is g:
tb = tb.tb_next
except Exception as e:
logging.exception(e)
tb = base_tb
if not tb:
tb = base_tb
return tb
def run_in_thread(func, *args, **kwargs):
"""Run function in thread, return a Thread object"""
from threading import Thread
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def run_in_subprocess(func, *args, **kwargs):
"""Run function in subprocess, return a Process object"""
from multiprocessing import Process
thread = Process(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def format_date(date, gmt_offset=0, relative=True, shorter=False, full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with ``relative=False``.
You can force a full format date ("July 10, 1980") with
``full_format=True``.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
From tornado
"""
if not date:
return '-'
if isinstance(date, float) or isinstance(date, int):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
format = None
if not full_format:
if relative and days == 0:
if seconds < 50:
return ("1 second ago" if seconds <= 1 else
"%(seconds)d seconds ago") % {"seconds": seconds}
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return ("1 minute ago" if minutes <= 1 else
"%(minutes)d minutes ago") % {"minutes": minutes}
hours = round(seconds / (60.0 * 60))
return ("1 hour ago" if hours <= 1 else
"%(hours)d hours ago") % {"hours": hours}
if days == 0:
format = "%(time)s"
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = "yesterday" if shorter else "yesterday at %(time)s"
elif days < 5:
format = "%(weekday)s" if shorter else "%(weekday)s at %(time)s"
elif days < 334: # 11mo, since confusing for same month last year
format = "%(month_name)s-%(day)s" if shorter else \
"%(month_name)s-%(day)s at %(time)s"
if format is None:
format = "%(month_name)s %(day)s, %(year)s" if shorter else \
"%(month_name)s %(day)s, %(year)s at %(time)s"
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
return format % {
"month_name": local_date.month - 1,
"weekday": local_date.weekday(),
"day": str(local_date.day),
"year": str(local_date.year),
"time": str_time
}
class TimeoutError(Exception):
pass
try:
import signal
if not hasattr(signal, 'SIGALRM'):
raise ImportError('signal')
class timeout:
"""
Time limit of command
with timeout(3):
time.sleep(10)
"""
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
if self.seconds:
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
if self.seconds:
signal.alarm(0)
except ImportError:
class timeout:
"""
Time limit of command (for windows)
"""
def __init__(self, seconds=1, error_message='Timeout'):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def utf8(string):
"""
Make sure string is utf8 encoded bytes.
If parameter is a object, object.__str__ will been called before encode as bytes
"""
if isinstance(string, six.text_type):
return string.encode('utf8')
elif isinstance(string, six.binary_type):
return string
else:
return six.text_type(string).encode('utf8')
def text(string, encoding='utf8'):
"""
Make sure string is unicode type, decode with given encoding if it's not.
If parameter is a object, object.__str__ will been called
"""
if isinstance(string, six.text_type):
return string
elif isinstance(string, six.binary_type):
return string.decode(encoding)
else:
return six.text_type(string)
def pretty_unicode(string):
"""
Make sure string is unicode, try to decode with utf8, or unicode escaped string if failed.
"""
if isinstance(string, six.text_type):
return string
try:
return string.decode("utf8")
except UnicodeDecodeError:
return string.decode('Latin-1').encode('unicode_escape')
def unicode_string(string):
"""
Make sure string is unicode, try to default with utf8, or base64 if failed.
can been decode by `decode_unicode_string`
"""
if isinstance(string, six.text_type):
return string
try:
return string.decode("utf8")
except UnicodeDecodeError:
return '[BASE64-DATA]' + base64.b64encode(string) + '[/BASE64-DATA]'
def unicode_dict(_dict):
"""
Make sure keys and values of dict is unicode.
"""
r = {}
for k, v in iteritems(_dict):
r[unicode_string(k)] = unicode_obj(v)
return r
def unicode_list(_list):
"""
Make sure every element in list is unicode. bytes will encode in base64
"""
return [unicode_obj(x) for x in _list]
def unicode_obj(obj):
"""
Make sure keys and values of dict/list/tuple is unicode. bytes will encode in base64.
Can been decode by `decode_unicode_obj`
"""
if isinstance(obj, dict):
return unicode_dict(obj)
elif isinstance(obj, (list, tuple)):
return unicode_list(obj)
elif isinstance(obj, six.string_types):
return unicode_string(obj)
elif isinstance(obj, (int, float)):
return obj
elif obj is None:
return obj
else:
try:
return text(obj)
except:
return text(repr(obj))
def decode_unicode_string(string):
"""
Decode string encoded by `unicode_string`
"""
if string.startswith('[BASE64-DATA]') and string.endswith('[/BASE64-DATA]'):
return base64.b64decode(string[len('[BASE64-DATA]'):-len('[/BASE64-DATA]')])
return string
def decode_unicode_obj(obj):
"""
Decode unicoded dict/list/tuple encoded by `unicode_obj`
"""
if isinstance(obj, dict):
r = {}
for k, v in iteritems(obj):
r[decode_unicode_string(k)] = decode_unicode_obj(v)
return r
elif isinstance(obj, six.string_types):
return decode_unicode_string(obj)
elif isinstance(obj, (list, tuple)):
return [decode_unicode_obj(x) for x in obj]
else:
return obj
class Get(object):
"""
Lazy value calculate for object
"""
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter()
class ObjectDict(dict):
"""
Object like dict, every dict[key] can visite by dict.key
If dict[key] is `Get`, calculate it's value.
"""
def __getattr__(self, name):
ret = self.__getitem__(name)
if hasattr(ret, '__get__'):
return ret.__get__(self, ObjectDict)
return ret
def load_object(name):
"""Load object from module"""
if "." not in name:
raise Exception('load object need module.object')
module_name, object_name = name.rsplit('.', 1)
if six.PY2:
module = __import__(module_name, globals(), locals(), [utf8(object_name)], -1)
else:
module = __import__(module_name, globals(), locals(), [object_name])
return getattr(module, object_name)
def get_python_console(namespace=None):
"""
Return a interactive python console instance with caller's stack
"""
if namespace is None:
import inspect
frame = inspect.currentframe()
caller = frame.f_back
if not caller:
logging.error("can't find caller who start this console.")
caller = frame
namespace = dict(caller.f_globals)
namespace.update(caller.f_locals)
try:
from IPython.terminal.interactiveshell import TerminalInteractiveShell
shell = TerminalInteractiveShell(user_ns=namespace)
except ImportError:
try:
import readline
import rlcompleter
readline.set_completer(rlcompleter.Completer(namespace).complete)
readline.parse_and_bind("tab: complete")
except ImportError:
pass
import code
shell = code.InteractiveConsole(namespace)
shell._quit = False
def exit():
shell._quit = True
def readfunc(prompt=""):
if shell._quit:
raise EOFError
return six.moves.input(prompt)
# inject exit method
shell.ask_exit = exit
shell.raw_input = readfunc
return shell
def python_console(namespace=None):
"""Start a interactive python console with caller's stack"""
if namespace is None:
import inspect
frame = inspect.currentframe()
caller = frame.f_back
if not caller:
logging.error("can't find caller who start this console.")
caller = frame
namespace = dict(caller.f_globals)
namespace.update(caller.f_locals)
return get_python_console(namespace=namespace).interact()
| apache-2.0 |
yuyu2172/chainercv | tests/chainer_experimental_tests/datasets_tests/sliceable_tests/test_getter_dataset.py | 3 | 3860 | import numpy as np
import unittest
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from chainercv.utils import testing
class SampleDataset(GetterDataset):
def __init__(self, iterable=tuple):
super(SampleDataset, self).__init__()
self.add_getter('item0', self.get_item0)
self.add_getter(iterable(('item1', 'item2')), self.get_item1_item2)
self.add_getter(('item3',), self.get_item3)
self.count = 0
def __len__(self):
return 10
def get_item0(self, i):
self.count += 1
return 'item0({:d})'.format(i)
def get_item1_item2(self, i):
self.count += 1
return 'item1({:d})'.format(i), 'item2({:d})'.format(i)
def get_item3(self, i):
self.count += 1
return ('item3({:d})'.format(i),)
@testing.parameterize(
{'iterable': tuple},
{'iterable': list},
{'iterable': np.array},
)
class TestGetterDataset(unittest.TestCase):
def setUp(self):
self.dataset = SampleDataset(self.iterable)
def test_keys(self):
self.assertEqual(
self.dataset.keys, ('item0', 'item1', 'item2', 'item3'))
def test_get_example_by_keys(self):
example = self.dataset.get_example_by_keys(1, (1, 2, 3))
self.assertEqual(example, ('item1(1)', 'item2(1)', 'item3(1)'))
self.assertEqual(self.dataset.count, 2)
def test_set_keys_single_name(self):
self.dataset.keys = 'item0'
self.assertEqual(self.dataset.keys, 'item0')
self.assertEqual(self.dataset[1], 'item0(1)')
def test_set_keys_single_index(self):
self.dataset.keys = 0
self.assertEqual(self.dataset.keys, 'item0')
self.assertEqual(self.dataset[1], 'item0(1)')
def test_set_keys_single_tuple_name(self):
if self.iterable is np.array:
self.skipTest('ndarray of strings is not supported')
self.dataset.keys = self.iterable(('item1',))
self.assertEqual(self.dataset.keys, ('item1',))
self.assertEqual(self.dataset[2], ('item1(2)',))
def test_set_keys_single_tuple_index(self):
self.dataset.keys = self.iterable((1,))
self.assertEqual(self.dataset.keys, ('item1',))
self.assertEqual(self.dataset[2], ('item1(2)',))
def test_set_keys_multiple_name(self):
if self.iterable is np.array:
self.skipTest('ndarray of strings is not supported')
self.dataset.keys = self.iterable(('item0', 'item2'))
self.assertEqual(self.dataset.keys, ('item0', 'item2'))
self.assertEqual(self.dataset[3], ('item0(3)', 'item2(3)'))
def test_set_keys_multiple_index(self):
self.dataset.keys = self.iterable((0, 2))
self.assertEqual(self.dataset.keys, ('item0', 'item2'))
self.assertEqual(self.dataset[3], ('item0(3)', 'item2(3)'))
def test_set_keys_multiple_bool(self):
self.dataset.keys = self.iterable((True, False, True, False))
self.assertEqual(self.dataset.keys, ('item0', 'item2'))
self.assertEqual(self.dataset[3], ('item0(3)', 'item2(3)'))
def test_set_keys_multiple_mixed(self):
if self.iterable is np.array:
self.skipTest('ndarray of strings is not supported')
self.dataset.keys = self.iterable(('item0', 2))
self.assertEqual(self.dataset.keys, ('item0', 'item2'))
self.assertEqual(self.dataset[3], ('item0(3)', 'item2(3)'))
def test_set_keys_invalid_name(self):
with self.assertRaises(KeyError):
self.dataset.keys = 'invalid'
def test_set_keys_invalid_index(self):
with self.assertRaises(IndexError):
self.dataset.keys = 4
def test_set_keys_invalid_bool(self):
with self.assertRaises(ValueError):
self.dataset.keys = (True, True)
testing.run_module(__name__, __file__)
| mit |
dou800/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/lib2to3/fixes/fix_metaclass.py | 153 | 8214 | """Fixer for __metaclass__ = X -> (metaclass=X) methods.
The various forms of classef (inherits nothing, inherits once, inherints
many) don't parse the same in the CST so we look at ALL classes for
a __metaclass__ and if we find one normalize the inherits to all be
an arglist.
For one-liner classes ('class X: pass') there is no indent/dedent so
we normalize those into having a suite.
Moving the __metaclass__ into the classdef can also cause the class
body to be empty so there is some special casing for that as well.
This fixer also tries very hard to keep original indenting and spacing
in all those corner cases.
"""
# Author: Jack Diederich
# Local imports
from .. import fixer_base
from ..pygram import token
from ..fixer_util import Name, syms, Node, Leaf
def has_metaclass(parent):
""" we have to check the cls_node without changing it.
There are two possiblities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
"""
for node in parent.children:
if node.type == syms.suite:
return has_metaclass(node)
elif node.type == syms.simple_stmt and node.children:
expr_node = node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
left_side = expr_node.children[0]
if isinstance(left_side, Leaf) and \
left_side.value == '__metaclass__':
return True
return False
def fixup_parse_tree(cls_node):
""" one-line classes don't get a suite in the parse tree so we add
one to normalize the tree
"""
for node in cls_node.children:
if node.type == syms.suite:
# already in the preferred format, do nothing
return
# !%@#! oneliners have no suite node, we have to fake one up
for i, node in enumerate(cls_node.children):
if node.type == token.COLON:
break
else:
raise ValueError("No class suite and no ':'!")
# move everything into a suite node
suite = Node(syms.suite, [])
while cls_node.children[i+1:]:
move_node = cls_node.children[i+1]
suite.append_child(move_node.clone())
move_node.remove()
cls_node.append_child(suite)
node = suite
def fixup_simple_stmt(parent, i, stmt_node):
""" if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything after the semi-colon into its own simple_stmt node
"""
for semi_ind, node in enumerate(stmt_node.children):
if node.type == token.SEMI: # *sigh*
break
else:
return
node.remove() # kill the semicolon
new_expr = Node(syms.expr_stmt, [])
new_stmt = Node(syms.simple_stmt, [new_expr])
while stmt_node.children[semi_ind:]:
move_node = stmt_node.children[semi_ind]
new_expr.append_child(move_node.clone())
move_node.remove()
parent.insert_child(i, new_stmt)
new_leaf1 = new_stmt.children[0].children[0]
old_leaf1 = stmt_node.children[0].children[0]
new_leaf1.prefix = old_leaf1.prefix
def remove_trailing_newline(node):
if node.children and node.children[-1].type == token.NEWLINE:
node.children[-1].remove()
def find_metas(cls_node):
# find the suite node (Mmm, sweet nodes)
for node in cls_node.children:
if node.type == syms.suite:
break
else:
raise ValueError("No class suite!")
# look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
for i, simple_node in list(enumerate(node.children)):
if simple_node.type == syms.simple_stmt and simple_node.children:
expr_node = simple_node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
# Check if the expr_node is a simple assignment.
left_node = expr_node.children[0]
if isinstance(left_node, Leaf) and \
left_node.value == u'__metaclass__':
# We found a assignment to __metaclass__.
fixup_simple_stmt(node, i, simple_node)
remove_trailing_newline(simple_node)
yield (node, i, simple_node)
def fixup_indent(suite):
""" If an INDENT is followed by a thing with a prefix then nuke the prefix
Otherwise we get in trouble when removing __metaclass__ at suite start
"""
kids = suite.children[::-1]
# find the first indent
while kids:
node = kids.pop()
if node.type == token.INDENT:
break
# find the first Leaf
while kids:
node = kids.pop()
if isinstance(node, Leaf) and node.type != token.DEDENT:
if node.prefix:
node.prefix = u''
return
else:
kids.extend(node.children[::-1])
class FixMetaclass(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef<any*>
"""
def transform(self, node, results):
if not has_metaclass(node):
return
fixup_parse_tree(node)
# find metaclasses, keep the last one
last_metaclass = None
for suite, i, stmt in find_metas(node):
last_metaclass = stmt
stmt.remove()
text_type = node.children[0].type # always Leaf(nnn, 'class')
# figure out what kind of classdef we have
if len(node.children) == 7:
# Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
# 0 1 2 3 4 5 6
if node.children[3].type == syms.arglist:
arglist = node.children[3]
# Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
else:
parent = node.children[3].clone()
arglist = Node(syms.arglist, [parent])
node.set_child(3, arglist)
elif len(node.children) == 6:
# Node(classdef, ['class', 'name', '(', ')', ':', suite])
# 0 1 2 3 4 5
arglist = Node(syms.arglist, [])
node.insert_child(3, arglist)
elif len(node.children) == 4:
# Node(classdef, ['class', 'name', ':', suite])
# 0 1 2 3
arglist = Node(syms.arglist, [])
node.insert_child(2, Leaf(token.RPAR, u')'))
node.insert_child(2, arglist)
node.insert_child(2, Leaf(token.LPAR, u'('))
else:
raise ValueError("Unexpected class definition")
# now stick the metaclass in the arglist
meta_txt = last_metaclass.children[0].children[0]
meta_txt.value = 'metaclass'
orig_meta_prefix = meta_txt.prefix
if arglist.children:
arglist.append_child(Leaf(token.COMMA, u','))
meta_txt.prefix = u' '
else:
meta_txt.prefix = u''
# compact the expression "metaclass = Meta" -> "metaclass=Meta"
expr_stmt = last_metaclass.children[0]
assert expr_stmt.type == syms.expr_stmt
expr_stmt.children[1].prefix = u''
expr_stmt.children[2].prefix = u''
arglist.append_child(last_metaclass)
fixup_indent(suite)
# check for empty suite
if not suite.children:
# one-liner that was just __metaclass_
suite.remove()
pass_leaf = Leaf(text_type, u'pass')
pass_leaf.prefix = orig_meta_prefix
node.append_child(pass_leaf)
node.append_child(Leaf(token.NEWLINE, u'\n'))
elif len(suite.children) > 1 and \
(suite.children[-2].type == token.INDENT and
suite.children[-1].type == token.DEDENT):
# there was only one line in the class body and it was __metaclass__
pass_leaf = Leaf(text_type, u'pass')
suite.insert_child(-1, pass_leaf)
suite.insert_child(-1, Leaf(token.NEWLINE, u'\n'))
| mit |
nvoron23/hue | desktop/core/ext-py/tablib-develop/tablib/packages/openpyxl/reader/excel.py | 61 | 4259 | # file openpyxl/reader/excel.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Read an xlsx file into Python"""
# Python stdlib imports
from zipfile import ZipFile, ZIP_DEFLATED, BadZipfile
# package imports
from ..shared.exc import OpenModeError, InvalidFileException
from ..shared.ooxml import ARC_SHARED_STRINGS, ARC_CORE, ARC_APP, \
ARC_WORKBOOK, PACKAGE_WORKSHEETS, ARC_STYLE
from ..workbook import Workbook
from ..reader.strings import read_string_table
from ..reader.style import read_style_table
from ..reader.workbook import read_sheets_titles, read_named_ranges, \
read_properties_core, get_sheet_ids
from ..reader.worksheet import read_worksheet
from ..reader.iter_worksheet import unpack_worksheet
def load_workbook(filename, use_iterators = False):
"""Open the given filename and return the workbook
:param filename: the path to open
:type filename: string
:param use_iterators: use lazy load for cells
:type use_iterators: bool
:rtype: :class:`openpyxl.workbook.Workbook`
.. note::
When using lazy load, all worksheets will be :class:`openpyxl.reader.iter_worksheet.IterableWorksheet`
and the returned workbook will be read-only.
"""
if isinstance(filename, file):
# fileobject must have been opened with 'rb' flag
# it is required by zipfile
if 'b' not in filename.mode:
raise OpenModeError("File-object must be opened in binary mode")
try:
archive = ZipFile(filename, 'r', ZIP_DEFLATED)
except (BadZipfile, RuntimeError, IOError, ValueError):
raise InvalidFileException()
wb = Workbook()
if use_iterators:
wb._set_optimized_read()
try:
_load_workbook(wb, archive, filename, use_iterators)
except KeyError:
raise InvalidFileException()
finally:
archive.close()
return wb
def _load_workbook(wb, archive, filename, use_iterators):
# get workbook-level information
wb.properties = read_properties_core(archive.read(ARC_CORE))
try:
string_table = read_string_table(archive.read(ARC_SHARED_STRINGS))
except KeyError:
string_table = {}
style_table = read_style_table(archive.read(ARC_STYLE))
# get worksheets
wb.worksheets = [] # remove preset worksheet
sheet_names = read_sheets_titles(archive.read(ARC_APP))
for i, sheet_name in enumerate(sheet_names):
sheet_codename = 'sheet%d.xml' % (i + 1)
worksheet_path = '%s/%s' % (PACKAGE_WORKSHEETS, sheet_codename)
if not use_iterators:
new_ws = read_worksheet(archive.read(worksheet_path), wb, sheet_name, string_table, style_table)
else:
xml_source = unpack_worksheet(archive, worksheet_path)
new_ws = read_worksheet(xml_source, wb, sheet_name, string_table, style_table, filename, sheet_codename)
#new_ws = read_worksheet(archive.read(worksheet_path), wb, sheet_name, string_table, style_table, filename, sheet_codename)
wb.add_sheet(new_ws, index = i)
wb._named_ranges = read_named_ranges(archive.read(ARC_WORKBOOK), wb)
| apache-2.0 |
SoreGums/bitcoinxt | contrib/seeds/makeseeds.py | 188 | 5574 | #!/usr/bin/env python
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = set([
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
])
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(\/Satoshi:0\.8\.6\/|\/Satoshi:0\.9\.(2|3|4|5)\/|\/Satoshi:0\.10\.\d{1,2}\/|\/Satoshi:0\.11\.\d{1,2}\/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in hist.items() if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print '[%s]:%i' % (ip['ip'], ip['port'])
else:
print '%s:%i' % (ip['ip'], ip['port'])
if __name__ == '__main__':
main()
| mit |
andrewcmyers/tensorflow | tensorflow/contrib/linalg/python/kernel_tests/linear_operator_full_matrix_test.py | 41 | 9729 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_test_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
linalg = linalg_lib
random_seed.set_random_seed(23)
class SquareLinearOperatorFullMatrixTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
shape = list(shape)
matrix = linear_operator_test_util.random_positive_definite_matrix(shape,
dtype)
if use_placeholder:
matrix_ph = array_ops.placeholder(dtype=dtype)
# Evaluate here because (i) you cannot feed a tensor, and (ii)
# values are random and we want the same value used for both mat and
# feed_dict.
matrix = matrix.eval()
operator = linalg.LinearOperatorFullMatrix(matrix_ph, is_square=True)
feed_dict = {matrix_ph: matrix}
else:
# is_square should be auto-detected here.
operator = linalg.LinearOperatorFullMatrix(matrix)
feed_dict = None
# Convert back to Tensor. Needed if use_placeholder, since then we have
# already evaluated matrix to a numpy array.
mat = ops.convert_to_tensor(matrix)
return operator, mat, feed_dict
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
matrix = [[1., 0.], [1., 11.]]
operator = linalg.LinearOperatorFullMatrix(
matrix,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
# Auto-detected.
self.assertTrue(operator.is_square)
def test_assert_non_singular_raises_if_cond_too_big_but_finite(self):
with self.test_session():
tril = linear_operator_test_util.random_tril_matrix(
shape=(50, 50), dtype=np.float32)
diag = np.logspace(-2, 2, 50).astype(np.float32)
tril = array_ops.matrix_set_diag(tril, diag)
matrix = math_ops.matmul(tril, tril, transpose_b=True).eval()
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.assertRaisesOpError("Singular matrix"):
# Ensure that we have finite condition number...just HUGE.
cond = np.linalg.cond(matrix)
self.assertTrue(np.isfinite(cond))
self.assertGreater(cond, 1e12)
operator.assert_non_singular().run()
def test_assert_non_singular_raises_if_cond_infinite(self):
with self.test_session():
matrix = [[1., 1.], [1., 1.]]
# We don't pass the is_self_adjoint hint here, which means we take the
# generic code path.
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.assertRaisesOpError("Singular matrix"):
operator.assert_non_singular().run()
def test_assert_self_adjoint(self):
matrix = [[0., 1.], [0., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix)
with self.test_session():
with self.assertRaisesOpError("not equal to its adjoint"):
operator.assert_self_adjoint().run()
def test_assert_positive_definite(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=True)
with self.test_session():
with self.assertRaisesOpError("Cholesky decomposition was not success"):
operator.assert_positive_definite().run()
class SquareLinearOperatorFullMatrixSymmetricPositiveDefiniteTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest.
In this test, the operator is constructed with hints that invoke the use of
a Cholesky decomposition for solves/determinant.
"""
def setUp(self):
# Increase from 1e-6 to 1e-5. This reduction in tolerance happens,
# presumably, because we are taking a different code path in the operator
# and the matrix. The operator uses a Choleksy, the matrix uses standard
# solve.
self._atol[dtypes.float32] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._atol[dtypes.float64] = 1e-10
self._rtol[dtypes.float64] = 1e-10
@property
def _dtypes_to_test(self):
return [dtypes.float32, dtypes.float64]
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
shape = list(shape)
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
if use_placeholder:
matrix_ph = array_ops.placeholder(dtype=dtype)
# Evaluate here because (i) you cannot feed a tensor, and (ii)
# values are random and we want the same value used for both mat and
# feed_dict.
matrix = matrix.eval()
# is_square is auto-set because of self_adjoint/pd.
operator = linalg.LinearOperatorFullMatrix(
matrix_ph, is_self_adjoint=True, is_positive_definite=True)
feed_dict = {matrix_ph: matrix}
else:
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
feed_dict = None
# Convert back to Tensor. Needed if use_placeholder, since then we have
# already evaluated matrix to a numpy array.
mat = ops.convert_to_tensor(matrix)
return operator, mat, feed_dict
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
matrix = [[1., 0.], [0., 7.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_positive_definite=True, is_self_adjoint=True)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_self_adjoint)
# Should be auto-set
self.assertTrue(operator.is_non_singular)
self.assertTrue(operator._can_use_cholesky)
self.assertTrue(operator.is_square)
def test_assert_non_singular(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.test_session():
# Cholesky decomposition may fail, so the error is not specific to
# non-singular.
with self.assertRaisesOpError(""):
operator.assert_non_singular().run()
def test_assert_self_adjoint(self):
matrix = [[0., 1.], [0., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.test_session():
with self.assertRaisesOpError("not equal to its adjoint"):
operator.assert_self_adjoint().run()
def test_assert_positive_definite(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.test_session():
# Cholesky decomposition may fail, so the error is not specific to
# non-singular.
with self.assertRaisesOpError(""):
operator.assert_positive_definite().run()
class NonSquareLinearOperatorFullMatrixTest(
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
matrix = linear_operator_test_util.random_normal(shape, dtype=dtype)
if use_placeholder:
matrix_ph = array_ops.placeholder(dtype=dtype)
# Evaluate here because (i) you cannot feed a tensor, and (ii)
# values are random and we want the same value used for both mat and
# feed_dict.
matrix = matrix.eval()
operator = linalg.LinearOperatorFullMatrix(matrix_ph)
feed_dict = {matrix_ph: matrix}
else:
operator = linalg.LinearOperatorFullMatrix(matrix)
feed_dict = None
# Convert back to Tensor. Needed if use_placeholder, since then we have
# already evaluated matrix to a numpy array.
mat = ops.convert_to_tensor(matrix)
return operator, mat, feed_dict
def test_is_x_flags(self):
matrix = [[3., 2., 1.], [1., 1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix,
is_self_adjoint=False)
self.assertEqual(operator.is_positive_definite, None)
self.assertEqual(operator.is_non_singular, None)
self.assertFalse(operator.is_self_adjoint)
self.assertFalse(operator.is_square)
def test_matrix_must_have_at_least_two_dims_or_raises(self):
with self.assertRaisesRegexp(ValueError, "at least 2 dimensions"):
linalg.LinearOperatorFullMatrix([1.])
if __name__ == "__main__":
test.main()
| apache-2.0 |
jabesq/home-assistant | homeassistant/components/tuya/switch.py | 7 | 1134 | """Support for Tuya switches."""
from homeassistant.components.switch import ENTITY_ID_FORMAT, SwitchDevice
from . import DATA_TUYA, TuyaDevice
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Tuya Switch device."""
if discovery_info is None:
return
tuya = hass.data[DATA_TUYA]
dev_ids = discovery_info.get('dev_ids')
devices = []
for dev_id in dev_ids:
device = tuya.get_device_by_id(dev_id)
if device is None:
continue
devices.append(TuyaSwitch(device))
add_entities(devices)
class TuyaSwitch(TuyaDevice, SwitchDevice):
"""Tuya Switch Device."""
def __init__(self, tuya):
"""Init Tuya switch device."""
super().__init__(tuya)
self.entity_id = ENTITY_ID_FORMAT.format(tuya.object_id())
@property
def is_on(self):
"""Return true if switch is on."""
return self.tuya.state()
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.tuya.turn_on()
def turn_off(self, **kwargs):
"""Turn the device off."""
self.tuya.turn_off()
| apache-2.0 |
syhpoon/xyzcmd | libxyz/ui/cmd.py | 1 | 26667 | #-*- coding: utf8 -*
#
# Max E. Kuznecov ~syhpoon <[email protected]> 2008
#
# This file is part of XYZCommander.
# XYZCommander is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# XYZCommander is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
# You should have received a copy of the GNU Lesser Public License
# along with XYZCommander. If not, see <http://www.gnu.org/licenses/>.
import copy
import traceback
import re
import libxyz.core
from libxyz.ui import lowui
from libxyz.ui import Prompt
from libxyz.ui import XYZListBox
from libxyz.ui import NumEntry
from libxyz.ui import Keys
from libxyz.ui.utils import refresh
from libxyz.core.utils import ustring, bstring, is_func, split_cmd
from libxyz.core.dsl import XYZ
from libxyz.exceptions import XYZRuntimeError
class Cmd(lowui.FlowWidget):
"""
Command line widget
"""
resolution = (u"cmd",)
LEFT = u"left"
RIGHT = u"right"
END = u"end"
UNDER = u"under"
def __init__(self, xyz):
"""
@param xyz: XYZData instance
Resources used: text, prompt
"""
super(Cmd, self).__init__()
self.xyz = xyz
self._default_attr = lambda x: xyz.skin.attr(self.resolution, x)
self._attr = self._default_attr
self._keys = Keys()
self._data = []
# Internal cursor index. Value is in range(0,len(self._data))
self._index = 0
# Virtual cursor index. Value is in range(0,maxcol)
self._vindex = 0
self._hindex = 0
self.context = None
self._panel = self.xyz.pm.load(":sys:panel")
self._plugin = self._init_plugin()
self._ud = libxyz.core.UserData()
self._history_file = "history"
_conf = self._plugin.conf
self.prompt = Prompt(_conf[u"prompt"], self._attr(u"prompt"))
self._undo = libxyz.core.Queue(_conf[u"undo_depth"])
self._history = libxyz.core.Queue(_conf[u"history_depth"])
self.xyz.hm.register("event:conf_update", self._update_conf_hook)
self.xyz.hm.register("event:startup", self._load_history_hook)
self.xyz.hm.register("event:shutdown", self._save_history_hook)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _update_conf_hook(self, var, val, sect):
"""
Hook for update conf event
"""
# Not ours
if sect != "plugins" or var != self._plugin.ns.pfull:
return
mapping = {
"prompt": lambda x: self._set_prompt(x),
"undo_depth": lambda x: self._undo.set_size(x),
"history_depth": lambda x: self._history.set_size(x),
}
for k, v in val.iteritems():
if k in mapping:
mapping[k](v)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _save_history_hook(self):
"""
Save history at shutdown
"""
f = None
try:
f = self._ud.openfile(self._history_file, "w", "data")
f.write("\n".join([bstring(u"".join(x)) for x in self._history]))
except XYZRuntimeError, e:
if f:
f.close()
xyzlog.info(_(u"Unable to open history data file: %s")
% unicode(e))
else:
if f:
f.close()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _load_history_hook(self):
"""
Load history at startup
"""
f = None
try:
f = self._ud.openfile(self._history_file, "r", "data")
data = f.readlines()
if len(data) > self._history.maxsize:
data = data[-self._history.maxsize]
self._history.clear()
for line in data:
self._history.push([x for x in ustring(line.rstrip())])
except Exception:
pass
if f:
f.close()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _init_plugin(self):
"""
Init virtual plugin
"""
_cmd_plugin = libxyz.core.plugins.VirtualPlugin(self.xyz, u"cmd")
_cmd_plugin.AUTHOR = u"Max E. Kuznecov <[email protected]>"
_cmd_plugin.VERSION = u"0.1"
_cmd_plugin.BRIEF_DESCRIPTION = _(u"Command line plugin")
_cmd_plugin.FULL_DESCRIPTION = _(u"Command line plugin. "\
u"It allows to enter, edit and "\
u"execute commands.")
_cmd_plugin.DOC = _(u"Configuration variables:\n"\
u"undo_depth - Specifies how many undo levels to "\
u"keep. Default - 10\n"\
u"history_depth - Specifies how many entered "\
u"commands to keep. Default - 50\n"\
u"prompt - Command line prompt. Default - '$ '")
_cmd_plugin.export(self.del_char)
_cmd_plugin.export(self.del_char_left)
_cmd_plugin.export(self.del_word_left)
_cmd_plugin.export(self.del_word_right)
_cmd_plugin.export(self.clear)
_cmd_plugin.export(self.clear_left)
_cmd_plugin.export(self.clear_right)
_cmd_plugin.export(self.cursor_begin)
_cmd_plugin.export(self.cursor_end)
_cmd_plugin.export(self.cursor_left)
_cmd_plugin.export(self.cursor_right)
_cmd_plugin.export(self.cursor_word_left)
_cmd_plugin.export(self.cursor_word_right)
_cmd_plugin.export(self.is_empty)
_cmd_plugin.export(self.undo)
_cmd_plugin.export(self.undo_clear)
_cmd_plugin.export(self.execute)
_cmd_plugin.export(self.history_prev)
_cmd_plugin.export(self.history_next)
_cmd_plugin.export(self.history_clear)
_cmd_plugin.export(self.show_history)
_cmd_plugin.export(self.put_active_object)
_cmd_plugin.export(self.put_active_object_path)
_cmd_plugin.export(self.put_inactive_object)
_cmd_plugin.export(self.put_inactive_object_path)
_cmd_plugin.export(self.put_active_cwd)
_cmd_plugin.export(self.put_inactive_cwd)
_cmd_plugin.export(self.put)
_cmd_plugin.export(self.get)
_cmd_plugin.export(self.append)
_cmd_plugin.export(self.escape)
_cmd_plugin.export(self.replace_aliases)
_cmd_plugin.export(self.get_attr_f)
_cmd_plugin.export(self.set_attr_f)
self.xyz.pm.register(_cmd_plugin)
self.context = _cmd_plugin.ns.pfull
return _cmd_plugin
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def selectable(self):
return True
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def rows(self, (maxcol,), focus=False):
"""
Return the number of lines that will be rendered
"""
return 1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def render(self, (maxcol,), focus=False):
"""
Render the command line
"""
text_attr = self._attr("text")
if self.prompt is not None:
_canv_prompt = self.prompt.render((maxcol,))
_prompt_len = len(self.prompt)
else:
_canv_prompt = lowui.Text(u"").render((maxcol,))
_prompt_len = 0
_data = [bstring(x) for x in self._get_visible(maxcol)]
_text_len = abs(maxcol - _prompt_len)
_canv_text = lowui.AttrWrap(lowui.Text("".join(_data)),
text_attr).render((maxcol,))
_canvases = []
if _prompt_len > 0:
_canvases.append((_canv_prompt, None, False, _prompt_len))
_canvases.append((_canv_text, 0, True, _text_len))
canv = lowui.CanvasJoin(_canvases)
canv.cursor = self.get_cursor_coords((maxcol,))
return canv
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _get_visible(self, maxcol):
"""
Calculate and return currently visible piece of cmd data
"""
maxcol -= 1
_plen = len(self.prompt)
_dlen = len(self._data)
_xindex = _plen + self._index
if self._vindex >= maxcol:
self._vindex = maxcol - 1
if _plen + _dlen >= maxcol:
_off = _xindex - maxcol
_to = _xindex
if _off < 0:
_off = 0
_to = maxcol - _plen + 1
_data = self._data[_off:_to]
else:
_data = self._data
return _data
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_cursor_coords(self, (maxcol,)):
"""
Return the (x,y) coordinates of cursor within widget.
"""
return len(self.prompt) + self._vindex, 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _put_object(self, char):
self._data.insert(self._index, ustring(char))
self._index += 1
self._vindex += 1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def keypress(self, size, key):
"""
Process pressed key
"""
_meth = self.xyz.km.process(key)
if _meth is not None:
return _meth()
else:
_good = [x for x in key if len(x) == 1]
if _good:
try:
map(lambda x: self._put_object(x), _good)
except Exception, e:
xyzlog.error(unicode(e))
xyzlog.debug(ustring(traceback.format_exc()))
else:
self._invalidate()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _save_undo(self):
"""
Save undo data
"""
self._undo.push((self._index, copy.copy(self._data)))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _restore_undo(self):
"""
Restore one undo level
"""
if self._undo:
self._index, self._data = self._undo.pop()
self._vindex = self._index
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _save_history(self):
"""
Save typed command history
"""
# Prevent duplicating entries
if not self._history.tail() == self._data:
self._history.push(copy.copy(self._data))
self._hindex = len(self._history)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _clear_cmd(self):
"""
Internal clear
"""
self._data = []
self._index = 0
self._vindex = 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _move_cursor(self, direction, chars=None, topred=None):
"""
Generic cursor moving procedure
@param direction: LEFT or RIGHT
@param chars: Number of character to move or END to move to the end
in corresponding direction
@param topred: Predicate function which must return True if char
under the cursor is endpoint in move
"""
_newindex = None
# Using predicate
if callable(topred):
if direction == self.LEFT:
_range = range(self._index - 1, 0, -1)
else:
_range = range(self._index + 1, len(self._data))
for i in _range:
if topred(self._data[i]):
_newindex = i
break
if _newindex is None:
# To start or end, depending on direction
return self._move_cursor(direction, chars=self.END)
elif direction == self.LEFT:
if chars == self.END:
_newindex = 0
elif chars is not None and self._index >= chars:
_newindex = self._index - chars
elif direction == self.RIGHT:
if chars == self.END:
_newindex = len(self._data)
elif (self._index + chars) <= len(self._data):
_newindex = self._index + chars
if _newindex is not None:
self._index = _newindex
self._vindex = _newindex
self._invalidate()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@refresh
def _delete(self, direction, chars=None, topred=None):
"""
Generic delete procedure
@param direction: LEFT, RIGHT or UNDER
@param chars: Number of characters to delete
@param topred: Predicate function which must return True if char
under the cursor is endpoint in delete
"""
_newindex = None
_delindex = None
_newdata = None
if callable(topred):
if direction == self.LEFT:
_range = range(self._index - 1, 0, -1)
else:
_range = range(self._index + 1, len(self._data))
_found = False
for i in _range:
if topred(self._data[i]):
_found = True
if direction == self.LEFT:
_newindex = i
_newdata = self._data[:_newindex] + \
self._data[self._index:]
else:
_newdata = self._data[:self._index] + self._data[i:]
self._save_undo()
break
if not _found:
return self._delete(direction, chars=self.END)
elif direction == self.UNDER:
if self._index >= 0 and self._index < len(self._data):
_delindex = self._index
elif direction == self.LEFT:
if chars == self.END:
self._save_undo()
_newdata = self._data[self._index:]
_newindex = 0
elif chars is not None and self._index >= chars:
_newindex = self._index - chars
_delindex = _newindex
elif direction == self.RIGHT:
if chars == self.END:
self._save_undo()
_newdata = self._data[:self._index]
if _newindex is not None:
self._index = _newindex
self._vindex = _newindex
if _newdata is not None:
self._data = _newdata
if _delindex is not None:
del(self._data[_delindex])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Public methods
def del_char_left(self):
"""
Delete single character left to the cursor
"""
self._delete(self.LEFT, chars=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def del_char(self):
"""
Delete single character under the cursor
"""
return self._delete(self.UNDER)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def del_word_left(self):
"""
Delete a word left to the cursor
"""
return self._delete(self.LEFT, topred=lambda x: x.isspace())
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def del_word_right(self):
"""
Delete a word right to the cursor
"""
return self._delete(self.RIGHT, topred=lambda x: x.isspace())
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def clear(self):
"""
Clear the whole cmd line
"""
self._save_undo()
self._clear_cmd()
self._invalidate()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def clear_left(self):
"""
Clear the cmd line from the cursor to the left
"""
self._delete(self.LEFT, chars=self.END)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def clear_right(self):
"""
Clear the cmd line from the cursor to the right
"""
return self._delete(self.RIGHT, chars=self.END)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def cursor_begin(self):
"""
Move cursor to the beginning of the command line
"""
self._move_cursor(self.LEFT, chars=self.END)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def cursor_end(self):
"""
Move cursor to the end of the command line
"""
self._move_cursor(self.RIGHT, chars=self.END)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def cursor_left(self):
"""
Move cursor left
"""
self._move_cursor(self.LEFT, chars=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def cursor_right(self):
"""
Move cursor right
"""
self._move_cursor(self.RIGHT, chars=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def cursor_word_left(self):
"""
Move cursor one word left
"""
self._move_cursor(self.LEFT, topred=lambda x: x.isspace())
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def cursor_word_right(self):
"""
Move cursor one word right
"""
self._move_cursor(self.RIGHT, topred=lambda x: x.isspace())
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def execute(self):
"""
Execute cmd contents
"""
# We're inside non-local VFS, execution is not allowed
if XYZ.call(":sys:panel:vfs_driver"):
xyzlog.error(
_(u"Unable to execute commands on non-local filesystems"))
return
if not self._data:
return
_data = self.replace_aliases(bstring(u"".join(self._data)))
_cmd, _rest = _split_cmd(_data)
if _cmd:
self._save_history()
# Do not run shell, execute internal command
if _cmd in self.xyz.conf["commands"]:
try:
if _rest is None:
arg = _rest
else:
arg = _rest
self.xyz.conf["commands"][_cmd](arg)
except Exception, e:
xyzlog.error(_("Error executing internal command %s: %s") %
(_cmd, unicode(e)))
elif _cmd:
if not hasattr(self, "_execf"):
self._execf = self.xyz.pm.from_load(":core:shell", "execute")
if not hasattr(self, "_reloadf"):
self._reloadf = self.xyz.pm.from_load(":sys:panel",
"reload_all")
self._execf(_data)
self._reloadf()
self._clear_cmd()
self._invalidate()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def replace_aliases(self, data):
"""
Check if first word of the command line (which is supposed to be a
command to execute) is in our aliases table, if it is, replace it.
@param data: String
"""
cmd, _ = _split_cmd(data)
try:
raw_alias = self.xyz.conf["aliases"][cmd]
if isinstance(raw_alias, basestring):
alias = raw_alias
elif is_func(raw_alias):
alias = raw_alias()
else:
xyzlog.error(_(u"Invalid alias type: %s") %
ustring(str(type(raw_alias))))
return data
return re.sub(r"^%s" % cmd, alias, data)
except KeyError:
return data
except Exception, e:
xyzlog.error(_(u"Unable to replace an alias %s: %s") %
(ustring(cmd), unicode(e)))
return data
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def is_empty(self):
"""
Return True if cmd is empty, i.e. has no contents
"""
return self._data == []
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def undo(self):
"""
Restore one level from undo buffer
"""
self._restore_undo()
self._invalidate()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def undo_clear(self):
"""
Clear undo buffer
"""
self._undo.clear()
self._invalidate()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def history_prev(self):
"""
Scroll through list of saved commands backward
"""
if self._hindex > 0:
self._hindex -= 1
self._data = copy.copy(self._history[self._hindex])
self.cursor_end()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def history_next(self):
"""
Scroll through list of saved commands forward
"""
if self._hindex < len(self._history) - 1:
self._hindex += 1
self._data = copy.copy(self._history[self._hindex])
self.cursor_end()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def history_clear(self):
"""
Clear commands history
"""
self._history.clear()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def show_history(self):
"""
Show commands history list
"""
def _enter_cb(num):
if num >= len(self._history):
return
self._data = copy.copy(self._history[num])
self.cursor_end()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
_sel_attr = self.xyz.skin.attr(XYZListBox.resolution, u"selected")
_wdata = []
for i in range(len(self._history)):
_wdata.append(NumEntry(u"".join([ustring(x) for x in
self._history[i]]),
_sel_attr, i,
enter_cb=_enter_cb))
_walker = lowui.SimpleListWalker(_wdata)
_walker.focus = len(_walker) - 1
_dim = tuple([x - 2 for x in self.xyz.screen.get_cols_rows()])
_ek = [self._keys.ENTER]
XYZListBox(self.xyz, self.xyz.top, _walker, _(u"History"),
_dim).show(exit_keys=_ek)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def put_active_object(self):
"""
Put currently selected VFS object name in panel to cmd line
"""
return self._put_engine(self._panel.get_selected().name)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def put_active_object_path(self):
"""
Put currently selected VFS object full path in panel to cmd line
"""
return self._put_engine(self._panel.get_selected().full_path)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def put_inactive_object(self):
"""
Put selected VFS object name in inactive panel to cmd line
"""
return self._put_engine(self._panel.get_selected(False).name)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def put_inactive_object_path(self):
"""
Put selected VFS object full path in inactive panel to cmd line
"""
return self._put_engine(self._panel.get_selected(False).full_path)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def put_active_cwd(self):
"""
Put current working directory of active panel to cmd line
"""
return self._put_engine(self._panel.cwd())
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def put_inactive_cwd(self):
"""
Put current working directory of inactive panel to cmd line
"""
return self._put_engine(self._panel.cwd(False))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def put(self, obj, space=True):
"""
Put arbitrary string to cmd line starting from the cursor position
@param space: Flag indicating whether to append space char after the obj
"""
return self._put_engine(obj, space=space)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get(self):
"""
Get cmd contents
"""
return bstring(u"".join(self._data))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def append(self, obj):
"""
Append arbitrary string at the end of cmd
"""
self.cursor_end()
self.put(obj, space=False)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _put_engine(self, obj, space=True):
"""
Put list content to cmd
"""
if space:
extra = [u" "]
else:
extra = []
map(lambda x: self._put_object(x),
self.escape([x for x in ustring(obj)]) + extra)
self._invalidate()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def escape(self, obj, join=False):
"""
Escape filename
@param obj: String to escape
@param join: If False return list otherwise return joined string
"""
result = []
toescape = [u" ", u"'", u'"', u"*", u"?", u"\\", u"&",
u"(", ")",
u"[", "]",
u"{", "}",
]
if isinstance(obj, basestring):
obj = ustring(obj)
for x in obj:
if x in toescape:
result.extend([u"\\", x])
else:
result.append(x)
if join:
return u"".join(result)
else:
return result
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _set_prompt(self, new):
"""
Set command line prompt
"""
self.prompt = Prompt(new, self._attr(u"prompt"))
self._invalidate()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_attr_f(self):
"""
Return current attributes function
"""
return self._attr
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def set_attr_f(self, f):
"""
Set attributes function
"""
self._attr = f
self._invalidate()
#++++++++++++++++++++++++++++++++++++++++++++++++
def _split_cmd(cmdline):
"""
Return command name and the rest of the command line
"""
_r = split_cmd(cmdline)
_len = len(_r)
if _len == 0:
return "", None
elif _len == 1:
return _r[0], None
else:
return _r[0], _r[1]
| gpl-3.0 |
2014c2g5/2014c2 | w2/static/Brython2.0.0-20140209-164925/Lib/unittest/test/test_assertions.py | 738 | 15398 | import datetime
import warnings
import unittest
from itertools import product
class Test_Assertions(unittest.TestCase):
def test_AlmostEqual(self):
self.assertAlmostEqual(1.00000001, 1.0)
self.assertNotAlmostEqual(1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 1.00000001, 1.0)
self.assertAlmostEqual(1.1, 1.0, places=0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.1, 1.0, places=1)
self.assertAlmostEqual(0, .1+.1j, places=0)
self.assertNotAlmostEqual(0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 0, .1+.1j, places=0)
self.assertAlmostEqual(float('inf'), float('inf'))
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
float('inf'), float('inf'))
def test_AmostEqualWithDelta(self):
self.assertAlmostEqual(1.1, 1.0, delta=0.5)
self.assertAlmostEqual(1.0, 1.1, delta=0.5)
self.assertNotAlmostEqual(1.1, 1.0, delta=0.05)
self.assertNotAlmostEqual(1.0, 1.1, delta=0.05)
self.assertRaises(self.failureException, self.assertAlmostEqual,
1.1, 1.0, delta=0.05)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.1, 1.0, delta=0.5)
self.assertRaises(TypeError, self.assertAlmostEqual,
1.1, 1.0, places=2, delta=2)
self.assertRaises(TypeError, self.assertNotAlmostEqual,
1.1, 1.0, places=2, delta=2)
first = datetime.datetime.now()
second = first + datetime.timedelta(seconds=10)
self.assertAlmostEqual(first, second,
delta=datetime.timedelta(seconds=20))
self.assertNotAlmostEqual(first, second,
delta=datetime.timedelta(seconds=5))
def test_assertRaises(self):
def _raise(e):
raise e
self.assertRaises(KeyError, _raise, KeyError)
self.assertRaises(KeyError, _raise, KeyError("key"))
try:
self.assertRaises(KeyError, lambda: None)
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
self.assertRaises(KeyError, _raise, ValueError)
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
with self.assertRaises(KeyError) as cm:
try:
raise KeyError
except Exception as e:
exc = e
raise
self.assertIs(cm.exception, exc)
with self.assertRaises(KeyError):
raise KeyError("key")
try:
with self.assertRaises(KeyError):
pass
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
with self.assertRaises(KeyError):
raise ValueError
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
def testAssertNotRegex(self):
self.assertNotRegex('Ala ma kota', r'r+')
try:
self.assertNotRegex('Ala ma kota', r'k.t', 'Message')
except self.failureException as e:
self.assertIn("'kot'", e.args[0])
self.assertIn('Message', e.args[0])
else:
self.fail('assertNotRegex should have failed.')
class TestLongMessage(unittest.TestCase):
"""Test that the individual asserts honour longMessage.
This actually tests all the message behaviour for
asserts that use longMessage."""
def setUp(self):
class TestableTestFalse(unittest.TestCase):
longMessage = False
failureException = self.failureException
def testTest(self):
pass
class TestableTestTrue(unittest.TestCase):
longMessage = True
failureException = self.failureException
def testTest(self):
pass
self.testableTrue = TestableTestTrue('testTest')
self.testableFalse = TestableTestFalse('testTest')
def testDefault(self):
self.assertTrue(unittest.TestCase.longMessage)
def test_formatMsg(self):
self.assertEqual(self.testableFalse._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableFalse._formatMessage("foo", "bar"), "foo")
self.assertEqual(self.testableTrue._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableTrue._formatMessage("foo", "bar"), "bar : foo")
# This blows up if _formatMessage uses string concatenation
self.testableTrue._formatMessage(object(), 'foo')
def test_formatMessage_unicode_error(self):
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing msg
self.testableTrue._formatMessage(one, '\uFFFD')
def assertMessages(self, methodName, args, errors):
"""
Check that methodName(*args) raises the correct error messages.
errors should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
def getMethod(i):
useTestableFalse = i < 2
if useTestableFalse:
test = self.testableFalse
else:
test = self.testableTrue
return getattr(test, methodName)
for i, expected_regex in enumerate(errors):
testMethod = getMethod(i)
kwargs = {}
withMsg = i % 2
if withMsg:
kwargs = {"msg": "oops"}
with self.assertRaisesRegex(self.failureException,
expected_regex=expected_regex):
testMethod(*args, **kwargs)
def testAssertTrue(self):
self.assertMessages('assertTrue', (False,),
["^False is not true$", "^oops$", "^False is not true$",
"^False is not true : oops$"])
def testAssertFalse(self):
self.assertMessages('assertFalse', (True,),
["^True is not false$", "^oops$", "^True is not false$",
"^True is not false : oops$"])
def testNotEqual(self):
self.assertMessages('assertNotEqual', (1, 1),
["^1 == 1$", "^oops$", "^1 == 1$",
"^1 == 1 : oops$"])
def testAlmostEqual(self):
self.assertMessages('assertAlmostEqual', (1, 2),
["^1 != 2 within 7 places$", "^oops$",
"^1 != 2 within 7 places$", "^1 != 2 within 7 places : oops$"])
def testNotAlmostEqual(self):
self.assertMessages('assertNotAlmostEqual', (1, 1),
["^1 == 1 within 7 places$", "^oops$",
"^1 == 1 within 7 places$", "^1 == 1 within 7 places : oops$"])
def test_baseAssertEqual(self):
self.assertMessages('_baseAssertEqual', (1, 2),
["^1 != 2$", "^oops$", "^1 != 2$", "^1 != 2 : oops$"])
def testAssertSequenceEqual(self):
# Error messages are multiline so not testing on full message
# assertTupleEqual and assertListEqual delegate to this method
self.assertMessages('assertSequenceEqual', ([], [None]),
["\+ \[None\]$", "^oops$", r"\+ \[None\]$",
r"\+ \[None\] : oops$"])
def testAssertSetEqual(self):
self.assertMessages('assertSetEqual', (set(), set([None])),
["None$", "^oops$", "None$",
"None : oops$"])
def testAssertIn(self):
self.assertMessages('assertIn', (None, []),
['^None not found in \[\]$', "^oops$",
'^None not found in \[\]$',
'^None not found in \[\] : oops$'])
def testAssertNotIn(self):
self.assertMessages('assertNotIn', (None, [None]),
['^None unexpectedly found in \[None\]$', "^oops$",
'^None unexpectedly found in \[None\]$',
'^None unexpectedly found in \[None\] : oops$'])
def testAssertDictEqual(self):
self.assertMessages('assertDictEqual', ({}, {'key': 'value'}),
[r"\+ \{'key': 'value'\}$", "^oops$",
"\+ \{'key': 'value'\}$",
"\+ \{'key': 'value'\} : oops$"])
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertMessages('assertDictContainsSubset', ({'key': 'value'}, {}),
["^Missing: 'key'$", "^oops$",
"^Missing: 'key'$",
"^Missing: 'key' : oops$"])
def testAssertMultiLineEqual(self):
self.assertMessages('assertMultiLineEqual', ("", "foo"),
[r"\+ foo$", "^oops$",
r"\+ foo$",
r"\+ foo : oops$"])
def testAssertLess(self):
self.assertMessages('assertLess', (2, 1),
["^2 not less than 1$", "^oops$",
"^2 not less than 1$", "^2 not less than 1 : oops$"])
def testAssertLessEqual(self):
self.assertMessages('assertLessEqual', (2, 1),
["^2 not less than or equal to 1$", "^oops$",
"^2 not less than or equal to 1$",
"^2 not less than or equal to 1 : oops$"])
def testAssertGreater(self):
self.assertMessages('assertGreater', (1, 2),
["^1 not greater than 2$", "^oops$",
"^1 not greater than 2$",
"^1 not greater than 2 : oops$"])
def testAssertGreaterEqual(self):
self.assertMessages('assertGreaterEqual', (1, 2),
["^1 not greater than or equal to 2$", "^oops$",
"^1 not greater than or equal to 2$",
"^1 not greater than or equal to 2 : oops$"])
def testAssertIsNone(self):
self.assertMessages('assertIsNone', ('not None',),
["^'not None' is not None$", "^oops$",
"^'not None' is not None$",
"^'not None' is not None : oops$"])
def testAssertIsNotNone(self):
self.assertMessages('assertIsNotNone', (None,),
["^unexpectedly None$", "^oops$",
"^unexpectedly None$",
"^unexpectedly None : oops$"])
def testAssertIs(self):
self.assertMessages('assertIs', (None, 'foo'),
["^None is not 'foo'$", "^oops$",
"^None is not 'foo'$",
"^None is not 'foo' : oops$"])
def testAssertIsNot(self):
self.assertMessages('assertIsNot', (None, None),
["^unexpectedly identical: None$", "^oops$",
"^unexpectedly identical: None$",
"^unexpectedly identical: None : oops$"])
def assertMessagesCM(self, methodName, args, func, errors):
"""
Check that the correct error messages are raised while executing:
with method(*args):
func()
*errors* should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
p = product((self.testableFalse, self.testableTrue),
({}, {"msg": "oops"}))
for (cls, kwargs), err in zip(p, errors):
method = getattr(cls, methodName)
with self.assertRaisesRegex(cls.failureException, err):
with method(*args, **kwargs) as cm:
func()
def testAssertRaises(self):
self.assertMessagesCM('assertRaises', (TypeError,), lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
def testAssertRaisesRegex(self):
# test error not raised
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'unused regex'),
lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
# test error raised but with wrong message
def raise_wrong_message():
raise TypeError('foo')
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
def testAssertWarns(self):
self.assertMessagesCM('assertWarns', (UserWarning,), lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
def testAssertWarnsRegex(self):
# test error not raised
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'unused regex'),
lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
# test warning raised but with wrong message
def raise_wrong_message():
warnings.warn('foo')
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
| gpl-2.0 |
RobbieL811/kernel_asus_flo | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
multmeio/mongoforms_dfields | mongoforms_dfields/models.py | 1 | 1449 | from mongoengine import *
__all__ = ['DynamicFields', 'has_dfields']
def has_dfields(cls):
class new_cls(cls):
@property
def _dfields(self):
return DynamicFields._dfields(self.__class__.__name__)
#return new_cls
cls._dfields = DynamicFields._dfields(cls.__name__)
return cls
# Create your models here.
class DynamicFields(Document):
refer = StringField(max_length=120, required=True)
name = StringField(max_length=120, required=True)
typo = StringField(required=True)
max_length = IntField()
min_value = IntField()
max_value = IntField()
required = BooleanField(default=False)
choices = ListField(required=False)
@classmethod
def _dfields(cls, refer):
dynamic_fields = cls.objects.filter(refer = refer)
ddynamic_fields = {}
for df in dynamic_fields:
ddynamic_fields[df.name] = eval(df.typo)()
ddynamic_fields[df.name].name = df.name
ddynamic_fields[df.name].max_length = df.max_length
ddynamic_fields[df.name].min_value = df.min_value
ddynamic_fields[df.name].max_value = df.max_value
ddynamic_fields[df.name].required = df.required
ddynamic_fields[df.name].choices = df.choices
return ddynamic_fields
def __unicode__(self):
return u"[%s] %s: %s" % (self.refer, self.typo, self.name)
| bsd-3-clause |
nasseralkmim/SaPy | sapy/plotter.py | 1 | 4743 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Line3D
from matplotlib.lines import Line2D
import numpy as np
def window(name):
return plt.figure(name)
def show():
plt.show()
return None
def undeformed(model):
"""Plot the undeformed structure according to the dimension
"""
if model.ndm == 2:
undeformed = window('Undeformed')
axes = undeformed.add_subplot(111, aspect='equal')
geo2d(model.XYZ, model.CON, axes, color='black')
label2d(model.XYZ, model.CON, axes)
undeformed.tight_layout()
if model.ndm == 3:
undeformed = window('Undeformed')
axes = undeformed.add_subplot(111, projection='3d', aspect='equal')
geo3d(model.XYZ, model.CON, axes, 'black')
label3d(model.XYZ, model.CON, axes)
undeformed.tight_layout()
def deformed(model, U):
"""Plot the deformed structure according to the dimension
"""
CON = model.CON
XYZ = np.copy(model.XYZ)
for n in range(model.nn):
for d in range(model.ndf[n]):
dof = model.DOF[n, d]
XYZ[n, d] += U[dof]
if model.ndm == 2:
deformed = window('Deformed')
axes = deformed.add_subplot(111, aspect='equal')
geo2d(XYZ, CON, axes, 'tomato')
geo2d(model.XYZ, model.CON, axes, 'black')
label2d(XYZ, CON, axes)
deformed.tight_layout()
if model.ndm == 3:
deformed = window('Deformed')
axes = deformed.add_subplot(111, projection='3d', aspect='equal')
geo3d(model.XYZ, model.CON, axes, 'black')
geo3d(XYZ, CON, axes, 'tomato')
label3d(XYZ, CON, axes)
deformed.tight_layout()
def geo3d(XYZ, CON, axes, color):
"""Plot the 3d model
"""
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_zlabel('z')
# draw nodes
for node, xyz in enumerate(XYZ):
axes.scatter(xyz[0], xyz[1], xyz[2], c='k', alpha=1, marker='s')
# draw edges
for ele, con in enumerate(CON):
xs = [XYZ[con[0]][0], XYZ[con[1]][0]]
ys = [XYZ[con[0]][1], XYZ[con[1]][1]]
zs = [XYZ[con[0]][2], XYZ[con[1]][2]]
line = Line3D(xs, ys, zs, linewidth=1.0, color=color)
axes.add_line(line)
def label3d(XYZ, CON, axes):
"""Plot the nodes and element label
"""
for node, xyz in enumerate(XYZ):
axes.text(xyz[0], xyz[1], xyz[2], str(node), color='b', size=10)
for ele, con in enumerate(CON):
xm = (XYZ[con[0]][0] + XYZ[con[1]][0])/2
ym = (XYZ[con[0]][1] + XYZ[con[1]][1])/2
zm = (XYZ[con[0]][2] + XYZ[con[1]][2])/2
axes.text(xm, ym, zm, str(ele), color='g', size=10)
def geo2d(XYZ, CON, axes, color):
"""Plot the 2d model
"""
axes.set_xlabel('x')
axes.set_ylabel('y')
# draw nodes
for xyz in XYZ:
axes.scatter(xyz[0], xyz[1], c='k', alpha=1, marker='s')
# draw edges
for con in CON:
xs = [XYZ[con[0]][0], XYZ[con[1]][0]]
ys = [XYZ[con[0]][1], XYZ[con[1]][1]]
line = Line2D(xs, ys, linewidth=1.0, color=color)
axes.add_line(line)
def label2d(XYZ, CON, axes):
"""Plot the nodes and element label
"""
for node, xyz in enumerate(XYZ):
axes.text(xyz[0], xyz[1], str(node), color='b', size=10)
for ele, con in enumerate(CON):
xm = (XYZ[con[0]][0] + XYZ[con[1]][0])/2
ym = (XYZ[con[0]][1] + XYZ[con[1]][1])/2
axes.text(xm, ym, str(ele), color='g', size=10)
def axialforce(model, Q):
"""Plot axial force
"""
if model.ndm == 2:
axial = window('Axial')
axes = axial.add_subplot(111, aspect='equal')
geo2d(model.XYZ, model.CON, axes, color='black')
axial2d(model.XYZ, model.CON, Q, axes)
axial.tight_layout()
if model.ndm == 3:
axial = window('Axial')
axes = axial.add_subplot(111, projection='3d', aspect='equal')
geo3d(model.XYZ, model.CON, axes, 'black')
axial3d(model.XYZ, model.CON, Q, axes)
axial.tight_layout()
def axial2d(XYZ, CON, Q, axes):
"""Plot text with axial force value
"""
for ele, con in enumerate(CON):
xm = (XYZ[con[0]][0] + XYZ[con[1]][0])/2
ym = (XYZ[con[0]][1] + XYZ[con[1]][1])/2
axes.text(xm, ym, str(np.round_(Q[ele], 1)), color='g', size=10)
def axial3d(XYZ, CON, Q, axes):
"""Plot text with axial force value for 3d plot
"""
for ele, con in enumerate(CON):
xm = (XYZ[con[0]][0] + XYZ[con[1]][0])/2
ym = (XYZ[con[0]][1] + XYZ[con[1]][1])/2
zm = (XYZ[con[0]][2] + XYZ[con[1]][2])/2
axes.text(xm, ym, zm, str(np.round_(Q[ele], 1)), color='g', size=10)
| gpl-3.0 |
KhronosGroup/COLLADA-CTS | StandardDataSets/1_5/collada/library_visual_scenes/visual_scene/node/lookat/node_lookat/node_lookat.py | 2 | 3873 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = []
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
if (self.__assistant.GetResults() == False):
self.status_baseline = False
return False
# Compare the rendered images between import and export
if ( self.__assistant.CompareRenderedImages(context) ):
self.__assistant.CompareImagesAgainst(context, "_reference_node_pyramid_default", None, None, 5, True, True)
self.status_baseline = self.__assistant.DeferJudgement(context)
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| mit |
ardsu/bitcoin | qa/rpc-tests/maxblocksinflight.py | 128 | 3998 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
'''
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
'''
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
self.create_callback_map()
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
try:
fail = False
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [ 8, 16, 128, 1024 ]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1<<256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print "Round %d: success (total requests: %d)" % (count, total_requests)
except AssertionError as e:
print "TEST FAILED: ", e.args
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="Binary to test max block requests behavior")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
| mit |
Rajeshkumar90/ansible-modules-extras | cloud/rackspace/rax_mon_notification_plan.py | 48 | 6222 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rax_mon_notification_plan
short_description: Create or delete a Rackspace Cloud Monitoring notification
plan.
description:
- Create or delete a Rackspace Cloud Monitoring notification plan by
associating existing rax_mon_notifications with severity levels. Rackspace
monitoring module flow | rax_mon_entity -> rax_mon_check ->
rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm
version_added: "2.0"
options:
state:
description:
- Ensure that the notification plan with this C(label) exists or does not
exist.
choices: ['present', 'absent']
label:
description:
- Defines a friendly name for this notification plan. String between 1 and
255 characters long.
required: true
critical_state:
description:
- Notification list to use when the alarm state is CRITICAL. Must be an
array of valid rax_mon_notification ids.
warning_state:
description:
- Notification list to use when the alarm state is WARNING. Must be an array
of valid rax_mon_notification ids.
ok_state:
description:
- Notification list to use when the alarm state is OK. Must be an array of
valid rax_mon_notification ids.
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Example notification plan
gather_facts: False
hosts: local
connection: local
tasks:
- name: Establish who gets called when.
rax_mon_notification_plan:
credentials: ~/.rax_pub
state: present
label: defcon1
critical_state:
- "{{ everyone['notification']['id'] }}"
warning_state:
- "{{ opsfloor['notification']['id'] }}"
register: defcon1
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def notification_plan(module, state, label, critical_state, warning_state, ok_state):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
notification_plan = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for n in cm.list_notification_plans():
if n.label == label:
existing.append(n)
if existing:
notification_plan = existing[0]
if state == 'present':
should_create = False
should_delete = False
if len(existing) > 1:
module.fail_json(msg='%s notification plans are labelled %s.' %
(len(existing), label))
if notification_plan:
should_delete = (critical_state and critical_state != notification_plan.critical_state) or \
(warning_state and warning_state != notification_plan.warning_state) or \
(ok_state and ok_state != notification_plan.ok_state)
if should_delete:
notification_plan.delete()
should_create = True
else:
should_create = True
if should_create:
notification_plan = cm.create_notification_plan(label=label,
critical_state=critical_state,
warning_state=warning_state,
ok_state=ok_state)
changed = True
else:
for np in existing:
np.delete()
changed = True
if notification_plan:
notification_plan_dict = {
"id": notification_plan.id,
"critical_state": notification_plan.critical_state,
"warning_state": notification_plan.warning_state,
"ok_state": notification_plan.ok_state,
"metadata": notification_plan.metadata
}
module.exit_json(changed=changed, notification_plan=notification_plan_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
critical_state=dict(type='list'),
warning_state=dict(type='list'),
ok_state=dict(type='list')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
critical_state = module.params.get('critical_state')
warning_state = module.params.get('warning_state')
ok_state = module.params.get('ok_state')
setup_rax_module(module, pyrax)
notification_plan(module, state, label, critical_state, warning_state, ok_state)
# Import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# Invoke the module.
if __name__ == '__main__':
main()
| gpl-3.0 |
hennersz/pySpace | basemap/doc/users/figures/omerc.py | 6 | 1065 | from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
# setup oblique mercator basemap.
# width is width of map projection region in km (xmax-xmin_
# height is height of map projection region in km (ymax-ymin)
# lon_0, lat_0 are the central longitude and latitude of the projection.
# lat_1,lon_1 and lat_2,lon_2 are two pairs of points that define
# the projection centerline.
# Map projection coordinates are automatically rotated to true north.
# To avoid this, set no_rot=True.
# area_thresh=1000 means don't plot coastline features less
# than 1000 km^2 in area.
m = Basemap(height=16700000,width=12000000,
resolution='l',area_thresh=1000.,projection='omerc',\
lon_0=-100,lat_0=15,lon_2=-120,lat_2=65,lon_1=-50,lat_1=-55)
m.drawcoastlines()
m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-80.,81.,20.))
m.drawmeridians(np.arange(-180.,181.,20.))
m.drawmapboundary(fill_color='aqua')
plt.title("Oblique Mercator Projection")
plt.show()
| gpl-3.0 |
Johnetordoff/osf.io | tests/test_mailchimp.py | 15 | 3860 | # -*- coding: utf-8 -*-
import mock
import pytest
from website import mailchimp_utils
from tests.base import OsfTestCase
from nose.tools import * # noqa; PEP8 asserts
from osf_tests.factories import UserFactory
import mailchimp
from framework.celery_tasks import handlers
@pytest.mark.enable_enqueue_task
class TestMailChimpHelpers(OsfTestCase):
def setUp(self, *args, **kwargs):
super(TestMailChimpHelpers, self).setUp(*args, **kwargs)
with self.context:
handlers.celery_before_request()
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_get_list_id_from_name(self, mock_get_mailchimp_api):
list_name = 'foo'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
mock_client.lists.list.assert_called_with(filters={'list_name': list_name})
assert_equal(list_id, 1)
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_get_list_name_from_id(self, mock_get_mailchimp_api):
list_id = '12345'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': 'foo'}]}
list_name = mailchimp_utils.get_list_name_from_id(list_id)
mock_client.lists.list.assert_called_with(filters={'list_id': list_id})
assert_equal(list_name, 'foo')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_subscribe_called_with_correct_arguments(self, mock_get_mailchimp_api):
list_name = 'foo'
user = UserFactory()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
mailchimp_utils.subscribe_mailchimp(list_name, user._id)
handlers.celery_teardown_request()
mock_client.lists.subscribe.assert_called_with(
id=list_id,
email={'email': user.username},
merge_vars={
'fname': user.given_name,
'lname': user.family_name,
},
double_optin=False,
update_existing=True,
)
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_subscribe_fake_email_does_not_throw_validation_error(self, mock_get_mailchimp_api):
list_name = 'foo'
user = UserFactory(username='[email protected]')
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
mock_client.lists.subscribe.side_effect = mailchimp.ValidationError
mailchimp_utils.subscribe_mailchimp(list_name, user._id)
handlers.celery_teardown_request()
user.reload()
assert_false(user.mailchimp_mailing_lists[list_name])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_unsubscribe_called_with_correct_arguments(self, mock_get_mailchimp_api):
list_name = 'foo'
user = UserFactory()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 2, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
mailchimp_utils.unsubscribe_mailchimp_async(list_name, user._id)
handlers.celery_teardown_request()
mock_client.lists.unsubscribe.assert_called_with(id=list_id, email={'email': user.username}, send_goodbye=True)
| apache-2.0 |
nikste/tensorflow | tensorflow/contrib/graph_editor/match.py | 186 | 5700 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple graph matching functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import string_types
from tensorflow.contrib.graph_editor import select
from tensorflow.python.framework import ops as tf_ops
__all__ = [
"op_type",
"OpMatcher",
]
def _make_graph_match(graph_match):
"""Convert to a OpMatcher instance."""
if graph_match is None:
return None
if not isinstance(graph_match, OpMatcher):
graph_match = OpMatcher(graph_match)
return graph_match
def op_type(op_types, op=None):
"""Check if an op is of the given type.
Args:
op_types: tuple of strings containing the types to check against.
For instance: ("Add", "Const")
op: the operation to check (or None).
Returns:
if op is not None, return True if the op is of the correct type.
if op is None, return a lambda function which does the type checking.
"""
if isinstance(op_types, string_types):
op_types = (op_types)
if op is None:
return lambda op: op.node_def.op in op_types
else:
return op.node_def.op in op_types
class OpMatcher(object):
"""Graph match class."""
def __init__(self, positive_filter):
"""Graph match constructor."""
self.positive_filters = []
self.input_op_matches = None
self.control_input_op_matches = None
self.output_op_matches = None
positive_filter = self._finalize_positive_filter(positive_filter)
self.positive_filters.append(positive_filter)
def _finalize_positive_filter(self, elem):
"""Convert to a filter function."""
if select.can_be_regex(elem):
regex_ = select.make_regex(elem)
return lambda op, regex=regex_: regex.search(op.name) is not None
elif isinstance(elem, tf_ops.Operation):
return lambda op, match_op=elem: op is match_op
elif callable(elem):
return elem
elif elem is True:
return lambda op: True
else:
raise ValueError("Cannot finalize the positive filter: {}".format(elem))
def __call__(self, op):
"""Evaluate if the op matches or not."""
if not isinstance(op, tf_ops.Operation):
raise TypeError("Expect tf.Operation, got: {}".format(type(op)))
for positive_filter in self.positive_filters:
if not positive_filter(op):
return False
if self.input_op_matches is not None:
if len(op.inputs) != len(self.input_op_matches):
return False
for input_t, input_op_match in zip(op.inputs, self.input_op_matches):
if input_op_match is None:
continue
if not input_op_match(input_t.op):
return False
if self.control_input_op_matches is not None:
if len(op.control_inputs) != len(self.control_input_op_matches):
return False
for cinput_op, cinput_op_match in zip(op.control_inputs,
self.control_input_op_matches):
if cinput_op_match is None:
continue
if not cinput_op_match(cinput_op):
return False
if self.output_op_matches is not None:
if len(op.outputs) != len(self.output_op_matches):
return False
for output_t, output_op_matches in zip(op.outputs,
self.output_op_matches):
if output_op_matches is None:
continue
if len(output_t.consumers()) != len(output_op_matches):
return False
for consumer_op, consumer_op_match in zip(output_t.consumers(),
output_op_matches):
if consumer_op_match is None:
continue
if not consumer_op_match(consumer_op):
return False
return True
def input_ops(self, *args):
"""Add input matches."""
if self.input_op_matches is not None:
raise ValueError("input_op_matches is already set.")
self.input_op_matches = []
for input_match in args:
self.input_op_matches.append(_make_graph_match(input_match))
return self
def control_input_ops(self, *args):
"""Add input matches."""
if self.control_input_op_matches is not None:
raise ValueError("control_input_op_matches is already set.")
self.control_input_op_matches = []
for input_match in args:
self.control_input_op_matches.append(_make_graph_match(input_match))
return self
def output_ops(self, *args):
"""Add output matches."""
if self.output_op_matches is not None:
raise ValueError("output_op_matches is already set.")
self.output_op_matches = []
for consumer_op_matches in args:
if consumer_op_matches is None:
self.output_op_matches.append(None)
if not isinstance(consumer_op_matches, list):
consumer_op_matches = [consumer_op_matches]
consumer_op_matches = [_make_graph_match(consumer_op_match)
for consumer_op_match in consumer_op_matches]
self.output_op_matches.append(consumer_op_matches)
return self
| apache-2.0 |
MeteoSwiss-APN/dawn | dawn/examples/python/unstructured_stencil.py | 1 | 3291 | #!/usr/bin/env python
##===-----------------------------------------------------------------------------*- Python -*-===##
# _
# | |
# __| | __ ___ ___ ___
# / _` |/ _` \ \ /\ / / '_ |
# | (_| | (_| |\ V V /| | | |
# \__,_|\__,_| \_/\_/ |_| |_| - Compiler Toolchain
##
##
# This file is distributed under the MIT License (MIT).
# See LICENSE.txt for details.
##
##===------------------------------------------------------------------------------------------===##
"""Copy stencil HIR generator
This program creates the HIR corresponding to an unstructured stencil using the SIR serialization Python API.
The code is meant as an example for high-level DSLs that could generate HIR from their own
internal IR.
"""
import argparse
import os
import dawn4py
from dawn4py.serialization import SIR
from dawn4py.serialization import utils as serial_utils
OUTPUT_NAME = "unstructured_stencil"
OUTPUT_FILE = f"{OUTPUT_NAME}.cpp"
OUTPUT_PATH = f"{OUTPUT_NAME}.cpp"
def main(args: argparse.Namespace):
interval = serial_utils.make_interval(SIR.Interval.Start, SIR.Interval.End, 0, 0)
body_ast = serial_utils.make_ast(
[
serial_utils.make_assignment_stmt(
serial_utils.make_field_access_expr("out"),
serial_utils.make_reduction_over_neighbor_expr(
"+",
serial_utils.make_literal_access_expr("1.0", SIR.BuiltinType.Float),
serial_utils.make_field_access_expr("in"),
chain=[SIR.LocationType.Value("Edge"), SIR.LocationType.Value("Cell")],
),
"=",
)
]
)
vertical_region_stmt = serial_utils.make_vertical_region_decl_stmt(
body_ast, interval, SIR.VerticalRegion.Forward
)
sir = serial_utils.make_sir(
OUTPUT_FILE,
SIR.GridType.Value("Unstructured"),
[
serial_utils.make_stencil(
OUTPUT_NAME,
serial_utils.make_ast([vertical_region_stmt]),
[
serial_utils.make_field(
"in",
serial_utils.make_field_dimensions_unstructured(
[SIR.LocationType.Value("Cell")], 1
),
),
serial_utils.make_field(
"out",
serial_utils.make_field_dimensions_unstructured(
[SIR.LocationType.Value("Edge")], 1
),
),
],
),
],
)
# print the SIR
if args.verbose:
serial_utils.pprint(sir)
# compile
code = dawn4py.compile(sir, backend=dawn4py.CodeGenBackend.CXXNaiveIco)
# write to file
print(f"Writing generated code to '{OUTPUT_PATH}'")
with open(OUTPUT_PATH, "w") as f:
f.write(code)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate a simple unstructured copy stencil using Dawn compiler"
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="Print the generated SIR",
)
main(parser.parse_args())
| mit |
dunkhong/grr | grr/server/grr_response_server/cronjobs_test.py | 2 | 31447 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import functools
import threading
from absl import app
from future.builtins import range
import mock
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.util import compatibility
from grr_response_server import cronjobs
from grr_response_server import data_store
from grr_response_server.flows.general import transfer
from grr_response_server.rdfvalues import cronjobs as rdf_cronjobs
from grr.test_lib import test_lib
class DummySystemCronJobRel(cronjobs.SystemCronJobBase):
"""Dummy system cron job."""
lifetime = rdfvalue.Duration.From(42, rdfvalue.HOURS)
frequency = rdfvalue.Duration.From(42, rdfvalue.DAYS)
def Run(self):
pass
class DummyStatefulSystemCronJobRel(cronjobs.SystemCronJobBase):
"""Dummy stateful system cron job."""
frequency = rdfvalue.Duration.From(1, rdfvalue.DAYS)
lifetime = rdfvalue.Duration.From(20, rdfvalue.HOURS)
VALUES = []
def Run(self):
state = self.ReadCronState()
value = state.get("value", 0)
DummyStatefulSystemCronJobRel.VALUES.append(value)
state["value"] = value + 1
self.WriteCronState(state)
class DummyDisabledSystemCronJobRel(DummySystemCronJobRel):
"""Disabled system cron job."""
enabled = False
def WaitForEvent(event):
event.wait()
def WaitAndSignal(wait_event, signal_event):
signal_event.set()
wait_event.wait()
class RelationalCronTest(test_lib.GRRBaseTest):
"""Tests for cron functionality."""
def tearDown(self):
# Make sure all pending cronjobs have been processed before we wipe the db.
cronjobs.CronManager()._GetThreadPool().Stop()
super(RelationalCronTest, self).tearDown()
def testCronJobPreservesFlowNameAndArguments(self):
pathspec = rdf_paths.PathSpec(
path="/foo", pathtype=rdf_paths.PathSpec.PathType.TSK)
cron_manager = cronjobs.CronManager()
flow_name = transfer.GetFile.__name__
cron_args = rdf_cronjobs.CreateCronJobArgs(
frequency="1d", allow_overruns=False, flow_name=flow_name)
cron_args.flow_args.pathspec = pathspec
job_id = cron_manager.CreateJob(cron_args=cron_args)
# Check that CronJob definition is saved properly
jobs = cron_manager.ListJobs(token=self.token)
self.assertLen(jobs, 1)
self.assertEqual(jobs[0], job_id)
cron_job = cron_manager.ReadJob(job_id, token=self.token)
hunt_args = cron_job.args.hunt_cron_action
self.assertEqual(hunt_args.flow_name, flow_name)
self.assertEqual(hunt_args.flow_args.pathspec, pathspec)
self.assertEqual(cron_job.frequency,
rdfvalue.Duration.From(1, rdfvalue.DAYS))
self.assertEqual(cron_job.allow_overruns, False)
def testCronJobStartsRun(self):
cron_manager = cronjobs.CronManager()
create_flow_args = rdf_cronjobs.CreateCronJobArgs()
job_id = cron_manager.CreateJob(cron_args=create_flow_args)
cron_job = cron_manager.ReadJob(job_id, token=self.token)
self.assertFalse(cron_manager.JobIsRunning(cron_job, token=self.token))
# The job never ran, so JobDueToRun() should return true.
self.assertTrue(cron_manager.JobDueToRun(cron_job))
cron_manager.RunOnce(token=self.token)
cron_manager._GetThreadPool().Join()
runs = cron_manager.ReadJobRuns(job_id, token=self.token)
self.assertLen(runs, 1)
run = runs[0]
self.assertTrue(run.run_id)
self.assertTrue(run.started_at)
self.assertTrue(run.finished_at)
self.assertEqual(run.status, "FINISHED")
def testDisabledCronJobDoesNotCreateJobs(self):
cron_manager = cronjobs.CronManager()
create_flow_args = rdf_cronjobs.CreateCronJobArgs()
job_id1 = cron_manager.CreateJob(cron_args=create_flow_args)
job_id2 = cron_manager.CreateJob(cron_args=create_flow_args)
cron_manager.DisableJob(job_id1, token=self.token)
event = threading.Event()
waiting_func = functools.partial(WaitForEvent, event)
try:
with mock.patch.object(cronjobs.RunHunt, "Run", wraps=waiting_func):
cron_manager.RunOnce(token=self.token)
cron_job1 = cron_manager.ReadJob(job_id1, token=self.token)
cron_job2 = cron_manager.ReadJob(job_id2, token=self.token)
# Disabled flow shouldn't be running, while not-disabled flow should run
# as usual.
self.assertFalse(cron_manager.JobIsRunning(cron_job1, token=self.token))
self.assertTrue(cron_manager.JobIsRunning(cron_job2, token=self.token))
finally:
event.set()
@mock.patch.object(cronjobs, "TASK_STARTUP_WAIT", 1)
def testCronMaxThreadsLimitIsRespectedAndCorrectlyHandled(self):
cron_manager = cronjobs.CronManager()
event = threading.Event()
waiting_func = functools.partial(WaitForEvent, event)
try:
create_flow_args = rdf_cronjobs.CreateCronJobArgs(
frequency="1h", lifetime="1h")
with mock.patch.object(cronjobs.RunHunt, "Run", wraps=waiting_func):
job_ids = []
for _ in range(cron_manager.max_threads * 2):
job_ids.append(cron_manager.CreateJob(cron_args=create_flow_args))
cron_manager.RunOnce(token=self.token)
count_scheduled = 0
for job_id in job_ids:
count_scheduled += len(cron_manager.ReadJobRuns(job_id))
self.assertEqual(count_scheduled, cron_manager.max_threads)
finally:
event.set()
cron_manager._GetThreadPool().Join()
count_scheduled = 0
for job_id in job_ids:
count_scheduled += len(cron_manager.ReadJobRuns(job_id))
# Check that tasks that were not scheduled due to max_threads limit
# run later.
self.assertEqual(count_scheduled, cron_manager.max_threads)
# Now all the cron jobs that weren't scheduled in previous RunOnce call
# due to max_threads limit should get scheduled.
cron_manager.RunOnce(token=self.token)
count_scheduled = 0
for job_id in job_ids:
count_scheduled += len(cron_manager.ReadJobRuns(job_id))
self.assertEqual(count_scheduled, cron_manager.max_threads * 2)
def testNonExistingSystemCronJobDoesNotPreventOtherCronJobsFromRunning(self):
# Have a fake non-existing cron job. We assume that cron jobs are going
# to be processed in alphabetical order, according to their cron job ids.
args = rdf_cronjobs.CronJobAction(
action_type=rdf_cronjobs.CronJobAction.ActionType.SYSTEM_CRON_ACTION,
system_cron_action=rdf_cronjobs.SystemCronAction(
job_class_name="__AbstractFakeCronJob__"))
job = rdf_cronjobs.CronJob(
cron_job_id="cron_1",
args=args,
enabled=True,
frequency=rdfvalue.Duration.From(2, rdfvalue.HOURS),
lifetime=rdfvalue.Duration.From(1, rdfvalue.HOURS),
allow_overruns=False)
data_store.REL_DB.WriteCronJob(job)
# Have a proper cron job.
cron_manager = cronjobs.CronManager()
args = rdf_cronjobs.CronJobAction(
action_type=rdf_cronjobs.CronJobAction.ActionType.SYSTEM_CRON_ACTION,
system_cron_action=rdf_cronjobs.SystemCronAction(
job_class_name="DummyStatefulSystemCronJobRel"))
job = rdf_cronjobs.CronJob(
cron_job_id="cron_2",
args=args,
enabled=True,
frequency=rdfvalue.Duration.From(2, rdfvalue.HOURS),
lifetime=rdfvalue.Duration.From(1, rdfvalue.HOURS),
allow_overruns=False)
data_store.REL_DB.WriteCronJob(job)
with self.assertRaises(cronjobs.OneOrMoreCronJobsFailedError):
cron_manager.RunOnce()
cron_manager._GetThreadPool().Join()
self.assertEmpty(cron_manager.ReadJobRuns("cron_1"))
self.assertLen(cron_manager.ReadJobRuns("cron_2"), 1)
def testCronJobRunDoesNothingIfCurrentFlowIsRunning(self):
event = threading.Event()
waiting_func = functools.partial(WaitForEvent, event)
cron_manager = cronjobs.CronManager()
with mock.patch.object(cronjobs.RunHunt, "Run", wraps=waiting_func):
try:
fake_time = rdfvalue.RDFDatetime.Now()
with test_lib.FakeTime(fake_time):
create_flow_args = rdf_cronjobs.CreateCronJobArgs(
allow_overruns=False, frequency="1h")
job_id = cron_manager.CreateJob(cron_args=create_flow_args)
cron_manager.RunOnce(token=self.token)
cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
self.assertLen(cron_job_runs, 1)
job = cron_manager.ReadJob(job_id)
self.assertTrue(cron_manager.JobIsRunning(job))
fake_time += rdfvalue.Duration.From(2, rdfvalue.HOURS)
with test_lib.FakeTime(fake_time):
cron_manager.RunOnce(token=self.token)
cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
self.assertLen(cron_job_runs, 1)
finally:
event.set()
cron_manager._GetThreadPool().Join()
def testForceRun(self):
event = threading.Event()
waiting_func = functools.partial(WaitForEvent, event)
cron_manager = cronjobs.CronManager()
with mock.patch.object(cronjobs.RunHunt, "Run", wraps=waiting_func):
try:
fake_time = rdfvalue.RDFDatetime.Now()
with test_lib.FakeTime(fake_time):
create_flow_args = rdf_cronjobs.CreateCronJobArgs(
allow_overruns=False, frequency="1h")
job_id = cron_manager.CreateJob(cron_args=create_flow_args)
cron_manager.RunOnce(token=self.token)
cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
self.assertLen(cron_job_runs, 1)
job = cron_manager.ReadJob(job_id)
self.assertTrue(cron_manager.JobIsRunning(job))
# At this point, there is a run currently executing and also the job
# is not due to run for another hour. We can still force execute the
# job.
cron_manager.RunOnce(token=self.token)
cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
self.assertLen(cron_job_runs, 1)
cron_manager.RequestForcedRun(job_id)
cron_manager.RunOnce(token=self.token)
cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
self.assertLen(cron_job_runs, 2)
# The only way to prevent a forced run is to disable the job.
cron_manager.DisableJob(job_id)
cron_manager.RequestForcedRun(job_id)
cron_manager.RunOnce(token=self.token)
cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
self.assertLen(cron_job_runs, 2)
# And enable again.
cron_manager.EnableJob(job_id)
cron_manager.RequestForcedRun(job_id)
cron_manager.RunOnce(token=self.token)
cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
self.assertLen(cron_job_runs, 3)
finally:
event.set()
cron_manager._GetThreadPool().Join()
def testCronJobRunDoesNothingIfDueTimeHasNotComeYet(self):
fake_time = rdfvalue.RDFDatetime.Now()
with test_lib.FakeTime(fake_time):
cron_manager = cronjobs.CronManager()
create_flow_args = rdf_cronjobs.CreateCronJobArgs(
allow_overruns=False, frequency="1h")
job_id = cron_manager.CreateJob(cron_args=create_flow_args)
cron_manager.RunOnce(token=self.token)
cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
self.assertLen(cron_job_runs, 1)
# Let 59 minutes pass. Frequency is 1 hour, so new flow is not
# supposed to start.
fake_time += rdfvalue.Duration.From(59, rdfvalue.MINUTES)
with test_lib.FakeTime(fake_time):
cron_manager.RunOnce(token=self.token)
cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
self.assertLen(cron_job_runs, 1)
def testCronJobRunPreventsOverrunsWhenAllowOverrunsIsFalse(self):
event = threading.Event()
waiting_func = functools.partial(WaitForEvent, event)
cron_manager = cronjobs.CronManager()
try:
with mock.patch.object(cronjobs.RunHunt, "Run", wraps=waiting_func):
fake_time = rdfvalue.RDFDatetime.Now()
with test_lib.FakeTime(fake_time):
create_flow_args = rdf_cronjobs.CreateCronJobArgs(
allow_overruns=False, frequency="1h")
job_id = cron_manager.CreateJob(cron_args=create_flow_args)
cron_manager.RunOnce(token=self.token)
cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
self.assertLen(cron_job_runs, 1)
# Let two hours pass. Frequency is 1h (i.e. cron job iterations are
# supposed to be started every hour), so the new flow should be started
# by RunOnce(). However, as allow_overruns is False, and previous
# iteration flow hasn't finished yet, no flow will be started.
fake_time += rdfvalue.Duration.From(2, rdfvalue.HOURS)
with test_lib.FakeTime(fake_time):
cron_manager.RunOnce(token=self.token)
cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
self.assertLen(cron_job_runs, 1)
finally:
event.set()
cron_manager._GetThreadPool().Join()
def testCronJobRunPreventsOverrunsWhenAllowOverrunsIsTrue(self):
event = threading.Event()
waiting_func = functools.partial(WaitForEvent, event)
cron_manager = cronjobs.CronManager()
try:
with mock.patch.object(cronjobs.RunHunt, "Run", wraps=waiting_func):
fake_time = rdfvalue.RDFDatetime.Now()
with test_lib.FakeTime(fake_time):
create_flow_args = rdf_cronjobs.CreateCronJobArgs(
allow_overruns=True, frequency="1h")
job_id = cron_manager.CreateJob(cron_args=create_flow_args)
cron_manager.RunOnce(token=self.token)
cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
self.assertLen(cron_job_runs, 1)
# Let two hours pass. Frequency is 1h (i.e. cron job iterations are
# supposed to be started every hour), so the new flow should be started
# by RunOnce(). However, as allow_overruns is False, and previous
# iteration flow hasn't finished yet, no flow will be started.
fake_time += rdfvalue.Duration.From(2, rdfvalue.HOURS)
with test_lib.FakeTime(fake_time):
cron_manager.RunOnce(token=self.token)
cron_job_runs = cron_manager.ReadJobRuns(job_id, token=self.token)
self.assertLen(cron_job_runs, 2)
finally:
event.set()
cron_manager._GetThreadPool().Join()
def testCronManagerListJobsDoesNotListDeletedJobs(self):
cron_manager = cronjobs.CronManager()
create_flow_args = rdf_cronjobs.CreateCronJobArgs()
cron_job_id = cron_manager.CreateJob(cron_args=create_flow_args)
self.assertLen(cron_manager.ListJobs(), 1)
cron_manager.DeleteJob(cron_job_id)
self.assertEmpty(cron_manager.ListJobs())
def testRunningJobs(self):
event = threading.Event()
waiting_func = functools.partial(WaitForEvent, event)
with mock.patch.object(cronjobs.RunHunt, "Run", wraps=waiting_func):
cron_manager = cronjobs.CronManager()
create_flow_args = rdf_cronjobs.CreateCronJobArgs(
frequency="1w", lifetime="1d")
job_id = cron_manager.CreateJob(cron_args=create_flow_args)
prev_timeout_value = cronjobs.CRON_JOB_TIMEOUT.GetValue(fields=[job_id])
prev_latency_value = cronjobs.CRON_JOB_LATENCY.GetValue([job_id])
cron_manager.RunOnce(token=self.token)
cron_job = cron_manager.ReadJob(job_id, token=self.token)
self.assertTrue(cron_manager.JobIsRunning(cron_job))
runs = cron_manager.ReadJobRuns(job_id)
self.assertLen(runs, 1)
run = runs[0]
self.assertEqual(cron_job.current_run_id, run.run_id)
self.assertEqual(run.status, "RUNNING")
event.set()
cron_manager._GetThreadPool().Join()
cron_job = cron_manager.ReadJob(job_id, token=self.token)
self.assertFalse(cron_manager.JobIsRunning(cron_job))
runs = cron_manager.ReadJobRuns(job_id)
self.assertLen(runs, 1)
run = runs[0]
self.assertFalse(cron_job.current_run_id)
self.assertEqual(run.status, "FINISHED")
# Check that timeout counter got updated.
current_timeout_value = cronjobs.CRON_JOB_TIMEOUT.GetValue([job_id])
self.assertEqual(current_timeout_value, prev_timeout_value)
# Check that latency stat got updated.
current_latency_value = cronjobs.CRON_JOB_LATENCY.GetValue([job_id])
self.assertEqual(current_latency_value.count - prev_latency_value.count,
1)
def testTimeoutOfCrashedCronJobIsHandledCorrectly(self):
wait_event = threading.Event()
signal_event = threading.Event()
waiting_func = functools.partial(WaitAndSignal, wait_event, signal_event)
fake_time = rdfvalue.RDFDatetime.Now()
with mock.patch.object(cronjobs.RunHunt, "Run", wraps=waiting_func):
with test_lib.FakeTime(fake_time):
cron_manager = cronjobs.CronManager()
create_flow_args = rdf_cronjobs.CreateCronJobArgs()
create_flow_args.frequency = "1h"
create_flow_args.lifetime = "1h"
job_id = cron_manager.CreateJob(cron_args=create_flow_args)
cron_manager.RunOnce(token=self.token)
# Make sure the cron job has actually been started.
signal_event.wait(10)
cron_job = cron_manager.ReadJob(job_id, token=self.token)
self.assertTrue(cron_manager.JobIsRunning(cron_job))
runs = cron_manager.ReadJobRuns(job_id)
self.assertLen(runs, 1)
run = runs[0]
self.assertEqual(cron_job.current_run_id, run.run_id)
self.assertEqual(run.status, "RUNNING")
try:
prev_timeout_value = cronjobs.CRON_JOB_TIMEOUT.GetValue([job_id])
prev_latency_value = cronjobs.CRON_JOB_LATENCY.GetValue([job_id])
fake_time += rdfvalue.Duration.From(2, rdfvalue.HOURS)
with test_lib.FakeTime(fake_time):
signal_event.clear()
# First RunOnce call will mark the stuck job as failed.
cron_manager.RunOnce(token=self.token)
cron_job = cron_manager.ReadJob(job_id, token=self.token)
self.assertEqual(cron_job.last_run_status, "LIFETIME_EXCEEDED")
runs = cron_manager.ReadJobRuns(job_id)
self.assertLen(runs, 1)
self.assertEqual(runs[0].status, "LIFETIME_EXCEEDED")
# Second RunOnce call will schedule a new invocation.
cron_manager.RunOnce(token=self.token)
signal_event.wait(10)
# Previous job run should be considered stuck by now. A new one
# has to be started.
cron_job = cron_manager.ReadJob(job_id, token=self.token)
runs = cron_manager.ReadJobRuns(job_id)
self.assertLen(runs, 2)
old_run, new_run = sorted(runs, key=lambda r: r.started_at)
self.assertIsNotNone(new_run.started_at)
self.assertEqual(new_run.status, "RUNNING")
self.assertEqual(cron_job.current_run_id, new_run.run_id)
self.assertIsNotNone(old_run.started_at)
self.assertIsNotNone(old_run.finished_at)
self.assertEqual(old_run.status, "LIFETIME_EXCEEDED")
self.assertEqual(cron_job.last_run_status, "LIFETIME_EXCEEDED")
# Check that timeout counter got updated.
current_timeout_value = cronjobs.CRON_JOB_TIMEOUT.GetValue([job_id])
self.assertEqual(current_timeout_value - prev_timeout_value, 1)
# Check that latency stat got updated.
current_latency_value = cronjobs.CRON_JOB_LATENCY.GetValue([job_id])
self.assertEqual(
current_latency_value.count - prev_latency_value.count, 1)
self.assertEqual(
current_latency_value.sum - prev_latency_value.sum,
rdfvalue.Duration.From(2, rdfvalue.HOURS).ToInt(rdfvalue.SECONDS))
finally:
# Make sure that the cron job thread actually finishes.
wait_event.set()
cron_manager._GetThreadPool().Join()
# Make sure cron job got updated correctly after stuck job as finished.
cron_job = cron_manager.ReadJob(job_id, token=self.token)
self.assertEqual(cron_job.last_run_status, "FINISHED")
def testTimeoutOfLongRunningJobIsHandledCorrectly(self):
wait_event = threading.Event()
signal_event = threading.Event()
waiting_func = functools.partial(WaitAndSignal, wait_event, signal_event)
fake_time = rdfvalue.RDFDatetime.Now()
with mock.patch.object(cronjobs.RunHunt, "Run", wraps=waiting_func):
with test_lib.FakeTime(fake_time):
cron_manager = cronjobs.CronManager()
create_flow_args = rdf_cronjobs.CreateCronJobArgs()
create_flow_args.lifetime = "1h"
job_id = cron_manager.CreateJob(cron_args=create_flow_args)
cron_manager.RunOnce(token=self.token)
# Make sure the cron job has actually been started.
signal_event.wait(10)
cron_job = cron_manager.ReadJob(job_id, token=self.token)
self.assertTrue(cron_manager.JobIsRunning(cron_job))
runs = cron_manager.ReadJobRuns(job_id)
self.assertLen(runs, 1)
run = runs[0]
self.assertEqual(cron_job.current_run_id, run.run_id)
self.assertEqual(run.status, "RUNNING")
prev_timeout_value = cronjobs.CRON_JOB_TIMEOUT.GetValue([job_id])
prev_latency_value = cronjobs.CRON_JOB_LATENCY.GetValue([job_id])
fake_time += rdfvalue.Duration.From(2, rdfvalue.HOURS)
with test_lib.FakeTime(fake_time):
wait_event.set()
cron_manager._GetThreadPool().Join()
cron_job = cron_manager.ReadJob(job_id, token=self.token)
runs = cron_manager.ReadJobRuns(job_id)
self.assertLen(runs, 1)
run = runs[0]
self.assertEqual(cron_job.last_run_status, "LIFETIME_EXCEEDED")
self.assertEqual(run.status, "LIFETIME_EXCEEDED")
# Check that timeout counter got updated.
current_timeout_value = cronjobs.CRON_JOB_TIMEOUT.GetValue([job_id])
self.assertEqual(current_timeout_value - prev_timeout_value, 1)
# Check that latency stat got updated.
current_latency_value = cronjobs.CRON_JOB_LATENCY.GetValue([job_id])
self.assertEqual(current_latency_value.count - prev_latency_value.count,
1)
self.assertEqual(
current_latency_value.sum - prev_latency_value.sum,
rdfvalue.Duration.From(2, rdfvalue.HOURS).ToInt(rdfvalue.SECONDS))
def testError(self):
with mock.patch.object(
cronjobs.RunHunt,
"Run",
side_effect=ValueError("Random cron job error.")):
cron_manager = cronjobs.CronManager()
create_flow_args = rdf_cronjobs.CreateCronJobArgs()
job_id = cron_manager.CreateJob(cron_args=create_flow_args)
prev_failure_value = cronjobs.CRON_JOB_FAILURE.GetValue([job_id])
prev_latency_value = cronjobs.CRON_JOB_LATENCY.GetValue([job_id])
cron_manager.RunOnce(token=self.token)
cron_manager._GetThreadPool().Join()
cron_job = cron_manager.ReadJob(job_id, token=self.token)
self.assertFalse(cron_manager.JobIsRunning(cron_job))
runs = cron_manager.ReadJobRuns(job_id)
self.assertLen(runs, 1)
run = runs[0]
self.assertEqual(cron_job.last_run_status, "ERROR")
self.assertEqual(run.status, "ERROR")
self.assertTrue(run.backtrace)
self.assertIn("cron job error", run.backtrace)
current_failure_value = cronjobs.CRON_JOB_FAILURE.GetValue([job_id])
current_latency_value = cronjobs.CRON_JOB_LATENCY.GetValue([job_id])
self.assertEqual(current_failure_value, prev_failure_value + 1)
self.assertEqual(current_latency_value.count,
prev_latency_value.count + 1)
def testSchedulingJobWithFixedNamePreservesTheName(self):
cron_manager = cronjobs.CronManager()
create_flow_args = rdf_cronjobs.CreateCronJobArgs()
job_id = cron_manager.CreateJob(cron_args=create_flow_args, job_id="TheJob")
self.assertEqual("TheJob", job_id)
def testSystemCronJobsGetScheduledAutomatically(self):
cronjobs.ScheduleSystemCronJobs(names=[DummySystemCronJobRel.__name__])
jobs = cronjobs.CronManager().ListJobs()
self.assertIn("DummySystemCronJobRel", jobs)
# System cron job should be enabled by default.
job = cronjobs.CronManager().ReadJob("DummySystemCronJobRel")
self.assertTrue(job.enabled)
def testSystemCronJobsWithDisabledAttributeDoNotGetScheduled(self):
cronjobs.ScheduleSystemCronJobs(
names=[DummyDisabledSystemCronJobRel.__name__])
jobs = cronjobs.CronManager().ListJobs()
self.assertIn("DummyDisabledSystemCronJobRel", jobs)
# System cron job should be enabled by default.
job = cronjobs.CronManager().ReadJob("DummyDisabledSystemCronJobRel")
self.assertFalse(job.enabled)
def testSystemCronJobsMayBeDisabledViaConfig(self):
with test_lib.ConfigOverrider(
{"Cron.disabled_cron_jobs": ["DummySystemCronJobRel"]}):
cronjobs.ScheduleSystemCronJobs()
cron_manager = cronjobs.CronManager()
jobs = cron_manager.ListJobs()
self.assertIn("DummySystemCronJobRel", jobs)
# This cron job should be disabled, because it's listed in
# Cron.disabled_cron_jobs config variable.
job = cron_manager.ReadJob("DummySystemCronJobRel")
self.assertFalse(job.enabled)
# Now remove the cron job from the list and check that it gets disabled
# after next ScheduleSystemCronJobs() call.
with test_lib.ConfigOverrider({"Cron.disabled_cron_jobs": []}):
cronjobs.ScheduleSystemCronJobs()
# System cron job should be enabled.
job = cron_manager.ReadJob("DummySystemCronJobRel")
self.assertTrue(job.enabled)
def testScheduleSystemCronJobsRaisesWhenFlowCanNotBeFound(self):
with test_lib.ConfigOverrider({"Cron.disabled_cron_jobs": ["NonExistent"]}):
self.assertRaises(ValueError, cronjobs.ScheduleSystemCronJobs)
def testSystemCronJobsGetScheduledWhenDisabledListInvalid(self):
with test_lib.ConfigOverrider({"Cron.disabled_cron_jobs": ["NonExistent"]}):
with self.assertRaises(ValueError):
cronjobs.ScheduleSystemCronJobs(names=[DummySystemCronJobRel.__name__])
jobs = cronjobs.CronManager().ListJobs()
self.assertIn("DummySystemCronJobRel", jobs)
def testStatefulSystemCronJobMaintainsState(self):
DummyStatefulSystemCronJobRel.VALUES = []
# We need to have a cron job started to have a place to maintain
# state.
cron_manager = cronjobs.CronManager()
args = rdf_cronjobs.CronJobAction(
action_type=rdf_cronjobs.CronJobAction.ActionType.SYSTEM_CRON_ACTION,
system_cron_action=rdf_cronjobs.SystemCronAction(
job_class_name="DummyStatefulSystemCronJobRel"))
job = rdf_cronjobs.CronJob(
cron_job_id="test_cron",
args=args,
enabled=True,
frequency=rdfvalue.Duration.From(2, rdfvalue.HOURS),
lifetime=rdfvalue.Duration.From(1, rdfvalue.HOURS),
allow_overruns=False)
data_store.REL_DB.WriteCronJob(job)
fake_time = rdfvalue.RDFDatetime.Now()
for i in range(3):
with test_lib.FakeTime(fake_time +
rdfvalue.Duration.From(3 * i, rdfvalue.HOURS)):
cron_manager.RunOnce()
cron_manager._GetThreadPool().Join()
runs = cron_manager.ReadJobRuns("test_cron")
self.assertLen(runs, i + 1)
for run in runs:
self.assertEqual(run.status, "FINISHED")
self.assertListEqual(DummyStatefulSystemCronJobRel.VALUES, [0, 1, 2])
def testHeartbeat_EnforceMaxRuntime(self):
cron_started_event = threading.Event()
heartbeat_event = threading.Event()
class HeartbeatingCronJob(cronjobs.SystemCronJobBase):
lifetime = rdfvalue.Duration.From(1, rdfvalue.HOURS)
frequency = rdfvalue.Duration.From(2, rdfvalue.HOURS)
allow_overruns = False
def Run(self):
cron_started_event.set()
heartbeat_event.wait()
fake_time = self.run_state.started_at + rdfvalue.Duration.From(
3, rdfvalue.HOURS)
with test_lib.FakeTime(fake_time):
self.HeartBeat()
self._TestHeartBeat(HeartbeatingCronJob, cron_started_event,
heartbeat_event)
def testHeartbeat_AllowOverruns(self):
cron_started_event = threading.Event()
heartbeat_event = threading.Event()
class HeartbeatingOverruningCronJob(cronjobs.SystemCronJobBase):
lifetime = rdfvalue.Duration.From(1, rdfvalue.HOURS)
frequency = rdfvalue.Duration.From(2, rdfvalue.HOURS)
allow_overruns = True
def Run(self):
cron_started_event.set()
heartbeat_event.wait()
fake_time = self.run_state.started_at + rdfvalue.Duration.From(
3, rdfvalue.HOURS)
with test_lib.FakeTime(fake_time):
self.HeartBeat()
self._TestHeartBeat(HeartbeatingOverruningCronJob, cron_started_event,
heartbeat_event)
def _TestHeartBeat(self, cron_class, cron_started_event, heartbeat_event):
"""Helper for heartbeat tests."""
cron_name = compatibility.GetName(cron_class)
cronjobs.ScheduleSystemCronJobs(names=[cron_name])
cron_manager = cronjobs.CronManager()
jobs = cronjobs.CronManager().ListJobs()
self.assertIn(cron_name, jobs)
try:
cron_manager.RunOnce()
cron_started_event.wait()
runs = cron_manager.ReadJobRuns(cron_name)
self.assertLen(runs, 1)
self.assertEqual(runs[0].status,
rdf_cronjobs.CronJobRun.CronJobRunStatus.RUNNING)
finally:
heartbeat_event.set()
cron_manager._GetThreadPool().Join()
runs = cron_manager.ReadJobRuns(cron_name)
self.assertLen(runs, 1)
if cron_class.allow_overruns:
expected_status = rdf_cronjobs.CronJobRun.CronJobRunStatus.FINISHED
else:
expected_status = (
rdf_cronjobs.CronJobRun.CronJobRunStatus.LIFETIME_EXCEEDED)
self.assertEqual(runs[0].status, expected_status)
@mock.patch.object(cronjobs, "_MAX_LOG_MESSAGES", 5)
def testLogging(self):
class LoggingCronJob(cronjobs.SystemCronJobBase):
lifetime = rdfvalue.Duration.From(1, rdfvalue.HOURS)
frequency = rdfvalue.Duration.From(2, rdfvalue.HOURS)
def Run(self):
for i in range(7):
self.Log("Log message %d." % i)
cron_name = compatibility.GetName(LoggingCronJob)
cronjobs.ScheduleSystemCronJobs(names=[cron_name])
cron_manager = cronjobs.CronManager()
try:
cron_manager.RunOnce()
finally:
cron_manager._GetThreadPool().Join()
runs = cron_manager.ReadJobRuns(cron_name)
self.assertLen(runs, 1)
self.assertEmpty(runs[0].backtrace)
self.assertEqual(runs[0].status,
rdf_cronjobs.CronJobRun.CronJobRunStatus.FINISHED)
# The first two log messages should be discarded since
# _MAX_LOG_MESSAGES is 5.
self.assertMultiLineEqual(
runs[0].log_message,
"Log message 6.\nLog message 5.\nLog message 4.\nLog message 3.\n"
"Log message 2.")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
notriddle/servo | components/script/dom/bindings/codegen/parser/tests/test_unforgeable.py | 3 | 7169 | def WebIDLTest(parser, harness):
parser.parse("""
interface Child : Parent {
};
interface Parent {
[Unforgeable] readonly attribute long foo;
};
""")
results = parser.finish()
harness.check(len(results), 2,
"Should be able to inherit from an interface with "
"[Unforgeable] properties.")
parser = parser.reset();
parser.parse("""
interface Child : Parent {
const short foo = 10;
};
interface Parent {
[Unforgeable] readonly attribute long foo;
};
""")
results = parser.finish()
harness.check(len(results), 2,
"Should be able to inherit from an interface with "
"[Unforgeable] properties even if we have a constant with "
"the same name.")
parser = parser.reset();
parser.parse("""
interface Child : Parent {
static attribute short foo;
};
interface Parent {
[Unforgeable] readonly attribute long foo;
};
""")
results = parser.finish()
harness.check(len(results), 2,
"Should be able to inherit from an interface with "
"[Unforgeable] properties even if we have a static attribute "
"with the same name.")
parser = parser.reset();
parser.parse("""
interface Child : Parent {
static void foo();
};
interface Parent {
[Unforgeable] readonly attribute long foo;
};
""")
results = parser.finish()
harness.check(len(results), 2,
"Should be able to inherit from an interface with "
"[Unforgeable] properties even if we have a static operation "
"with the same name.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface Child : Parent {
void foo();
};
interface Parent {
[Unforgeable] readonly attribute long foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown when shadowing unforgeable attribute on "
"parent with operation.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface Child : Parent {
void foo();
};
interface Parent {
[Unforgeable] void foo();
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown when shadowing unforgeable operation on "
"parent with operation.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface Child : Parent {
attribute short foo;
};
interface Parent {
[Unforgeable] readonly attribute long foo;
};
""")
results = parser.finish()
except Exception as x:
threw = True
harness.ok(threw,
"Should have thrown when shadowing unforgeable attribute on "
"parent with attribute.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface Child : Parent {
attribute short foo;
};
interface Parent {
[Unforgeable] void foo();
};
""")
results = parser.finish()
except Exception as x:
threw = True
harness.ok(threw,
"Should have thrown when shadowing unforgeable operation on "
"parent with attribute.")
parser = parser.reset();
parser.parse("""
interface Child : Parent {
};
interface Parent {};
interface Consequential {
[Unforgeable] readonly attribute long foo;
};
Parent implements Consequential;
""")
results = parser.finish()
harness.check(len(results), 4,
"Should be able to inherit from an interface with a "
"consequential interface with [Unforgeable] properties.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface Child : Parent {
void foo();
};
interface Parent {};
interface Consequential {
[Unforgeable] readonly attribute long foo;
};
Parent implements Consequential;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown when shadowing unforgeable attribute "
"of parent's consequential interface.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface Child : Parent {
};
interface Parent : GrandParent {};
interface GrandParent {};
interface Consequential {
[Unforgeable] readonly attribute long foo;
};
GrandParent implements Consequential;
interface ChildConsequential {
void foo();
};
Child implements ChildConsequential;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown when our consequential interface shadows unforgeable attribute "
"of ancestor's consequential interface.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface Child : Parent {
};
interface Parent : GrandParent {};
interface GrandParent {};
interface Consequential {
[Unforgeable] void foo();
};
GrandParent implements Consequential;
interface ChildConsequential {
void foo();
};
Child implements ChildConsequential;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown when our consequential interface shadows unforgeable operation "
"of ancestor's consequential interface.")
parser = parser.reset();
parser.parse("""
interface iface {
[Unforgeable] attribute long foo;
};
""")
results = parser.finish()
harness.check(len(results), 1,
"Should allow writable [Unforgeable] attribute.")
parser = parser.reset();
threw = False
try:
parser.parse("""
interface iface {
[Unforgeable] static readonly attribute long foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown for static [Unforgeable] attribute.")
| mpl-2.0 |
atsuyim/readthedocs.org | readthedocs/rtd_tests/tests/test_views.py | 21 | 9286 | from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.six.moves.urllib.parse import urlsplit
from django_dynamic_fixture import get
from django_dynamic_fixture import new
from readthedocs.builds.constants import LATEST
from readthedocs.projects.models import ImportedFile
from readthedocs.projects.models import Project
from readthedocs.projects.forms import UpdateProjectForm
from readthedocs.privacy.loader import AdminPermission
class Testmaker(TestCase):
def setUp(self):
self.eric = User(username='eric')
self.eric.set_password('test')
self.eric.save()
def test_imported_docs(self):
# Test Import
self.client.login(username='eric', password='test')
user = User.objects.get(username='eric')
r = self.client.get('/dashboard/', {})
self.assertEqual(r.status_code, 200)
r = self.client.get('/dashboard/import/manual/', {})
self.assertEqual(r.status_code, 200)
form = UpdateProjectForm(data={
'name': 'Django Kong',
'repo': 'https://github.com/ericholscher/django-kong',
'repo_type': 'git',
'description': 'OOHHH AH AH AH KONG SMASH',
'language': 'en',
'default_branch': '',
'project_url': 'http://django-kong.rtfd.org',
'default_version': LATEST,
'privacy_level': 'public',
'version_privacy_level': 'public',
'python_interpreter': 'python',
'documentation_type': 'sphinx',
'csrfmiddlewaretoken': '34af7c8a5ba84b84564403a280d9a9be',
}, user=user)
_ = form.save()
_ = Project.objects.get(slug='django-kong')
r = self.client.get('/docs/django-kong/en/latest/', {})
self.assertEqual(r.status_code, 200)
r = self.client.get('/dashboard/django-kong/versions/', {})
self.assertEqual(r.status_code, 200)
r = self.client.get('/projects/django-kong/builds/')
self.assertEqual(r.status_code, 200)
r = self.client.get('/dashboard/django-kong/edit/', {})
self.assertEqual(r.status_code, 200)
r = self.client.get('/dashboard/django-kong/subprojects/', {})
self.assertEqual(r.status_code, 200)
class PrivateViewsAreProtectedTests(TestCase):
fixtures = ['eric', 'test_data']
def assertRedirectToLogin(self, response):
self.assertEqual(response.status_code, 302)
url = response['Location']
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(url)
self.assertEqual(e_path, reverse('account_login'))
def test_dashboard(self):
response = self.client.get('/dashboard/')
self.assertRedirectToLogin(response)
def test_import_wizard_start(self):
response = self.client.get('/dashboard/import/')
self.assertRedirectToLogin(response)
def test_import_wizard_manual(self):
response = self.client.get('/dashboard/import/manual/')
self.assertRedirectToLogin(response)
def test_import_wizard_demo(self):
response = self.client.get('/dashboard/import/manual/demo/')
self.assertRedirectToLogin(response)
def test_import_github(self):
response = self.client.get('/dashboard/import/github/')
self.assertRedirectToLogin(response)
def test_import_bitbucket(self):
response = self.client.get('/dashboard/import/bitbucket/')
self.assertRedirectToLogin(response)
def test_projects_manage(self):
response = self.client.get('/dashboard/pip/')
self.assertRedirectToLogin(response)
def test_alias_manage(self):
response = self.client.get('/dashboard/pip/alias/')
self.assertRedirectToLogin(response)
def test_comments_moderation(self):
response = self.client.get('/dashboard/pip/comments_moderation/')
self.assertRedirectToLogin(response)
def test_edit(self):
response = self.client.get('/dashboard/pip/edit/')
self.assertRedirectToLogin(response)
def test_advanced(self):
response = self.client.get('/dashboard/pip/advanced/')
self.assertRedirectToLogin(response)
def test_version_delete_html(self):
response = self.client.get('/dashboard/pip/version/0.8.1/delete_html/')
self.assertRedirectToLogin(response)
def test_version_detail(self):
response = self.client.get('/dashboard/pip/version/0.8.1/')
self.assertRedirectToLogin(response)
def test_versions(self):
response = self.client.get('/dashboard/pip/versions/')
self.assertRedirectToLogin(response)
def test_project_delete(self):
response = self.client.get('/dashboard/pip/delete/')
self.assertRedirectToLogin(response)
def test_subprojects_delete(self):
response = self.client.get(
'/dashboard/pip/subprojects/delete/a-subproject/')
self.assertRedirectToLogin(response)
def test_subprojects(self):
response = self.client.get('/dashboard/pip/subprojects/')
self.assertRedirectToLogin(response)
def test_project_users(self):
response = self.client.get('/dashboard/pip/users/')
self.assertRedirectToLogin(response)
def test_project_users_delete(self):
response = self.client.get('/dashboard/pip/users/delete/')
self.assertRedirectToLogin(response)
def test_project_notifications(self):
response = self.client.get('/dashboard/pip/notifications/')
self.assertRedirectToLogin(response)
def test_project_comments(self):
response = self.client.get('/dashboard/pip/comments/')
self.assertRedirectToLogin(response)
def test_project_notifications_delete(self):
response = self.client.get('/dashboard/pip/notifications/delete/')
self.assertRedirectToLogin(response)
def test_project_translations(self):
response = self.client.get('/dashboard/pip/translations/')
self.assertRedirectToLogin(response)
def test_project_translations_delete(self):
response = self.client.get('/dashboard/pip/translations/delete/a-translation/')
self.assertRedirectToLogin(response)
def test_project_redirects(self):
response = self.client.get('/dashboard/pip/redirects/')
self.assertRedirectToLogin(response)
def test_project_redirects_delete(self):
response = self.client.get('/dashboard/pip/redirects/delete/')
self.assertRedirectToLogin(response)
class RandomPageTests(TestCase):
fixtures = ['eric', 'test_data']
def setUp(self):
self.pip = Project.objects.get(slug='pip')
self.pip_version = self.pip.versions.all()[0]
ImportedFile.objects.create(
project=self.pip,
version=self.pip_version,
name='File',
slug='file',
path='file.html',
md5='abcdef',
commit='1234567890abcdef')
def test_random_page_view_redirects(self):
response = self.client.get('/random/')
self.assertEqual(response.status_code, 302)
def test_takes_project_slug(self):
response = self.client.get('/random/pip/')
self.assertEqual(response.status_code, 302)
self.assertTrue('/pip/' in response['Location'])
def test_404_for_unknown_project(self):
response = self.client.get('/random/not-existent/')
self.assertEqual(response.status_code, 404)
def test_404_for_with_no_imported_files(self):
ImportedFile.objects.all().delete()
response = self.client.get('/random/pip/')
self.assertEqual(response.status_code, 404)
class SubprojectViewTests(TestCase):
def setUp(self):
self.user = new(User, username='test')
self.user.set_password('test')
self.user.save()
self.project = get(Project, slug='my-mainproject')
self.subproject = get(Project, slug='my-subproject')
self.project.add_subproject(self.subproject)
self.client.login(username='test', password='test')
def test_deny_delete_for_non_project_admins(self):
response = self.client.get('/dashboard/my-mainproject/subprojects/delete/my-subproject/')
self.assertEqual(response.status_code, 404)
self.assertTrue(self.subproject in [r.child for r in self.project.subprojects.all()])
def test_admins_can_delete_subprojects(self):
self.project.users.add(self.user)
self.subproject.users.add(self.user)
response = self.client.get('/dashboard/my-mainproject/subprojects/delete/my-subproject/')
self.assertEqual(response.status_code, 302)
self.assertTrue(self.subproject not in [r.child for r in self.project.subprojects.all()])
def test_project_admins_can_delete_subprojects_that_they_are_not_admin_of(self):
self.project.users.add(self.user)
self.assertFalse(AdminPermission.is_admin(self.user, self.subproject))
response = self.client.get('/dashboard/my-mainproject/subprojects/delete/my-subproject/')
self.assertEqual(response.status_code, 302)
self.assertTrue(self.subproject not in [r.child for r in self.project.subprojects.all()])
| mit |
jlegendary/scikit-learn | examples/plot_multilabel.py | 87 | 4279 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
40223112/2015cd_midterm | static/Brython3.1.0-20150301-090019/Lib/collections/abc.py | 739 | 16026 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
# Private list of types that we want to register with the various ABCs
# so that they will pass tests like:
# it = iter(somebytearray)
# assert isinstance(it, Iterable)
# Note: in other implementations, these types many not be distinct
# and they make have their own implementation specific types that
# are not included on this list.
bytes_iterator = type(iter(b''))
bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
mappingproxy = type(type.__dict__)
### ONE-TRICK PONIES ###
class Hashable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
return NotImplemented
class Iterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if any("__iter__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Iterator(Iterable):
__slots__ = ()
@abstractmethod
def __next__(self):
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if (any("__next__" in B.__dict__ for B in C.__mro__) and
any("__iter__" in B.__dict__ for B in C.__mro__)):
return True
return NotImplemented
Iterator.register(bytes_iterator)
Iterator.register(bytearray_iterator)
#Iterator.register(callable_iterator)
Iterator.register(dict_keyiterator)
Iterator.register(dict_valueiterator)
Iterator.register(dict_itemiterator)
Iterator.register(list_iterator)
Iterator.register(list_reverseiterator)
Iterator.register(range_iterator)
Iterator.register(set_iterator)
Iterator.register(str_iterator)
Iterator.register(tuple_iterator)
Iterator.register(zip_iterator)
class Sized(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if any("__len__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Container(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if any("__contains__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Callable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if any("__call__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
__slots__ = ()
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other < self
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other <= self
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
def isdisjoint(self, other):
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxsize
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
__slots__ = ()
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
__slots__ = ()
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
return KeysView(self)
def items(self):
return ItemsView(self)
def values(self):
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
Mapping.register(mappingproxy)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
for key in self._mapping:
yield key
KeysView.register(dict_keys)
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
ItemsView.register(dict_items)
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
ValuesView.register(dict_values)
class MutableMapping(Mapping):
__slots__ = ()
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
__slots__ = ()
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(str)
Sequence.register(range)
class ByteString(Sequence):
"""This unifies bytes and bytearray.
XXX Should add all their methods.
"""
__slots__ = ()
ByteString.register(bytes)
ByteString.register(bytearray)
class MutableSequence(Sequence):
__slots__ = ()
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
raise IndexError
def append(self, value):
self.insert(len(self), value)
def clear(self):
try:
while True:
self.pop()
except IndexError:
pass
def reverse(self):
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
for v in values:
self.append(v)
def pop(self, index=-1):
v = self[index]
del self[index]
return v
def remove(self, value):
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
MutableSequence.register(bytearray) # Multiply inheriting, see ByteString
| gpl-3.0 |
dhalleine/tensorflow | tensorflow/python/kernel_tests/matmul_op_test.py | 3 | 12431 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
class MatMulTest(tf.test.TestCase):
def assertAllCloseAccordingToType(self, a, b, rtol=1e-6, atol=1e-6):
"""Like test_util.assertAllCloseToType, but with looser fp16 limits.
With matrix multiplication, many values are summed, compounding
accuracy issues. Thus, we set fp16 tolerance to 1e-2 instead of 1e-6.
(This primarily affects the CPU versions, which accumulate in fp16;
the CUDA versions currently use fp32 math internally.)
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
rtol: relative tolerance
atol: absolute tolerance
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, 1e-2)
atol = max(atol, 1e-2)
self.assertAllClose(a, b, rtol=rtol, atol=atol)
def _testCpuMatmul(self, x, y, transpose_x=False, transpose_y=False):
x_mat = np.matrix(x).T if transpose_x else np.matrix(x)
y_mat = np.matrix(y).T if transpose_y else np.matrix(y)
np_ans = x_mat * y_mat
with self.test_session(use_gpu=False):
tf_ans = tf.matmul(x, y, transpose_x, transpose_y).eval()
self.assertAllCloseAccordingToType(np_ans, tf_ans)
self.assertAllEqual(np_ans.shape, tf_ans.shape)
def _testGpuMatmul(self, x, y, transpose_x=False, transpose_y=False):
x_mat = np.matrix(x).T if transpose_x else np.matrix(x)
y_mat = np.matrix(y).T if transpose_y else np.matrix(y)
np_ans = x_mat * y_mat
with self.test_session(use_gpu=True):
tf_ans = tf.matmul(x, y, transpose_x, transpose_y).eval()
self.assertAllCloseAccordingToType(np_ans, tf_ans)
self.assertAllEqual(np_ans.shape, tf_ans.shape)
def _randMatrix(self, rows, cols, dtype):
if dtype in (np.complex64, np.complex128):
if dtype == np.complex64:
float_dtype = np.float32
else:
float_dtype = np.float64
real = self._randMatrix(rows, cols, float_dtype)
imag = self._randMatrix(rows, cols, float_dtype)
return real + 1j * imag
else:
return np.random.uniform(low=1.0, high=100.0, size=rows * cols).reshape(
[rows, cols]).astype(dtype)
# Basic test:
# [ [1],
# [2],
# [3], * [1, 2]
# [4] ]
def testFloatBasic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.float32)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testDoubleBasic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.float64)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.float64)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testHalfBasic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.float16)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.float16)
self._testCpuMatmul(x, y)
if test_util.CudaSupportsHalfMatMulAndConv():
self._testGpuMatmul(x, y)
else:
print("Built without fp16 matmul support, skipping GPU test.")
def testInt32Basic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.int32)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.int32)
self._testCpuMatmul(x, y)
def testComplex64Basic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.complex64)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.complex64)
self._testCpuMatmul(x, y)
def testComplex128Basic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.complex128)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.complex128)
self._testCpuMatmul(x, y)
# Tests testing random sized matrices.
def testFloatRandom(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testDoubleRandom(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.float64)
y = self._randMatrix(k, m, np.float64)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testHalfRandom(self):
for _ in range(10):
n, k, m = np.random.randint(1, 10, size=3) # Smaller range than float.
x = self._randMatrix(n, k, np.float16)
y = self._randMatrix(k, m, np.float16)
self._testCpuMatmul(x, y)
if test_util.CudaSupportsHalfMatMulAndConv():
self._testGpuMatmul(x, y)
else:
print("Built without fp16 matmul support, skipping GPU test.")
def testInt32Random(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.int32)
y = self._randMatrix(k, m, np.int32)
self._testCpuMatmul(x, y)
def testComplex64Random(self):
for _ in range(10):
n, k, m = np.random.randint(1, 10, size=3) # Smaller range than float
x = self._randMatrix(n, k, np.complex64)
y = self._randMatrix(k, m, np.complex64)
self._testCpuMatmul(x, y)
def testComplex128Random(self):
for _ in range(10):
n, k, m = np.random.randint(1, 10, size=3) # Smaller range than float
x = self._randMatrix(n, k, np.complex128)
y = self._randMatrix(k, m, np.complex128)
self._testCpuMatmul(x, y)
# Test the cases that transpose the matrices before multiplying.
# NOTE(keveman): The cases where only one of the inputs is
# transposed are covered by tf.matmul's gradient function.
def testFloatRandomTransposeBoth(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(k, n, np.float32)
y = self._randMatrix(m, k, np.float32)
self._testCpuMatmul(x, y, True, True)
self._testGpuMatmul(x, y, True, True)
def testDoubleRandomTransposeBoth(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(k, n, np.float64)
y = self._randMatrix(m, k, np.float64)
self._testCpuMatmul(x, y, True, True)
self._testGpuMatmul(x, y, True, True)
def testHalfRandomTransposeBoth(self):
for _ in range(10):
n, k, m = np.random.randint(1, 10, size=3) # Smaller range than float.
x = self._randMatrix(k, n, np.float16)
y = self._randMatrix(m, k, np.float16)
self._testCpuMatmul(x, y, True, True)
if test_util.CudaSupportsHalfMatMulAndConv():
self._testGpuMatmul(x, y, True, True)
else:
print("Built without fp16 matmul support, skipping GPU test.")
def testMatMul_OutEmpty_A(self):
n, k, m = 0, 8, 3
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testMatMul_OutEmpty_B(self):
n, k, m = 3, 8, 0
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testMatMul_Inputs_Empty(self):
n, k, m = 3, 0, 4
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testShapeErrors(self):
a = tf.placeholder(tf.float32, [32, 37])
b = tf.placeholder(tf.float32, [36, 2])
c = tf.placeholder(tf.float32, [37])
with self.assertRaisesRegexp(
ValueError, "Dimensions 37 and 36 are not compatible"):
tf.matmul(a, b)
with self.assertRaisesRegexp(ValueError, "must have rank 2"):
tf.matmul(a, c)
# TODO(zhifengc): Figures out how to test matmul gradients on GPU.
class MatMulGradientTest(tf.test.TestCase):
def testGradientInput0(self):
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=[2, 4], dtype=tf.float64, name="y")
m = tf.matmul(x, y, name="matmul")
err = tf.test.compute_gradient_error(x, [3, 2], m, [3, 4])
print("matmul input0 gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientInput1(self):
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=[2, 4], dtype=tf.float64, name="y")
m = tf.matmul(x, y, name="matmul")
err = tf.test.compute_gradient_error(y, [2, 4], m, [3, 4])
print("matmul input1 gradient err = ", err)
self.assertLess(err, 1e-10)
def _VerifyInput0(self, transpose_a, transpose_b):
shape_x = [3, 2]
shape_y = [2, 4]
if transpose_a:
shape_x = list(reversed(shape_x))
if transpose_b:
shape_y = list(reversed(shape_y))
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=shape_x,
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=shape_y, dtype=tf.float64, name="y")
m = tf.matmul(x, y, transpose_a, transpose_b, name="matmul")
err = tf.test.compute_gradient_error(x, shape_x, m, [3, 4])
print("matmul input0 gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientInput0WithTranspose(self):
self._VerifyInput0(transpose_a=True, transpose_b=False)
self._VerifyInput0(transpose_a=False, transpose_b=True)
self._VerifyInput0(transpose_a=True, transpose_b=True)
def _VerifyInput1(self, transpose_a, transpose_b):
shape_x = [3, 2]
shape_y = [2, 4]
if transpose_a:
shape_x = list(reversed(shape_x))
if transpose_b:
shape_y = list(reversed(shape_y))
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=shape_x,
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=shape_y, dtype=tf.float64, name="y")
m = tf.matmul(x, y, transpose_a, transpose_b, name="matmul")
err = tf.test.compute_gradient_error(y, shape_y, m, [3, 4])
print("matmul input1 gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientInput1WithTranspose(self):
self._VerifyInput1(transpose_a=True, transpose_b=False)
self._VerifyInput1(transpose_a=False, transpose_b=True)
self._VerifyInput1(transpose_a=True, transpose_b=True)
class MatMulStatsTest(tf.test.TestCase):
def testSimpleStatistics(self):
g = tf.Graph()
with g.as_default():
a = tf.Variable(tf.random_normal([25, 16]))
b = tf.Variable(tf.random_normal([16, 9]))
tf.matmul(a, b)
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
def testTransposedStatistics(self):
g = tf.Graph()
with g.as_default():
a = tf.Variable(tf.random_normal([16, 25]))
b = tf.Variable(tf.random_normal([16, 9]))
tf.matmul(a, b, transpose_a=True)
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
IronLanguages/ironpython2 | Src/StdLib/Lib/bisect.py | 1261 | 2595 | """Bisection algorithms."""
def insort_right(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
a.insert(lo, x)
insort = insort_right # backward compatibility
def bisect_right(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
bisect = bisect_right # backward compatibility
def insort_left(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
a.insert(lo, x)
def bisect_left(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
return lo
# Overwrite above definitions with a fast C implementation
try:
from _bisect import *
except ImportError:
pass
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.